id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
17,517
def load_pem(contents, pem_marker): (pem_start, pem_end) = _markers(pem_marker) pem_lines = [] in_pem_part = False for line in contents.splitlines(): line = line.strip() if (not line): continue if (line == pem_start): if in_pem_part: raise ValueError(('Seen start marker "%s" twice' % pem_start)) in_pem_part = True continue if (not in_pem_part): continue if (in_pem_part and (line == pem_end)): in_pem_part = False break if (b(':') in line): continue pem_lines.append(line) if (not pem_lines): raise ValueError(('No PEM start marker "%s" found' % pem_start)) if in_pem_part: raise ValueError(('No PEM end marker "%s" found' % pem_end)) pem = b('').join(pem_lines) return base64.decodestring(pem)
[ "def", "load_pem", "(", "contents", ",", "pem_marker", ")", ":", "(", "pem_start", ",", "pem_end", ")", "=", "_markers", "(", "pem_marker", ")", "pem_lines", "=", "[", "]", "in_pem_part", "=", "False", "for", "line", "in", "contents", ".", "splitlines", ...
loads a pem file .
train
false
17,518
def qual_missing(id_, seq, qual): return (qual is None)
[ "def", "qual_missing", "(", "id_", ",", "seq", ",", "qual", ")", ":", "return", "(", "qual", "is", "None", ")" ]
returns true if qual is none .
train
false
17,519
def getFloatDefaultByDictionary(defaultFloat, dictionary, key): evaluatedFloat = None if (key in dictionary): evaluatedFloat = getFloatFromValue(dictionary[key]) if (evaluatedFloat == None): return defaultFloat return evaluatedFloat
[ "def", "getFloatDefaultByDictionary", "(", "defaultFloat", ",", "dictionary", ",", "key", ")", ":", "evaluatedFloat", "=", "None", "if", "(", "key", "in", "dictionary", ")", ":", "evaluatedFloat", "=", "getFloatFromValue", "(", "dictionary", "[", "key", "]", "...
get the value as a float .
train
false
17,520
def teardown_test_db(): from sickbeard.db import db_cons for connection in db_cons: db_cons[connection].commit()
[ "def", "teardown_test_db", "(", ")", ":", "from", "sickbeard", ".", "db", "import", "db_cons", "for", "connection", "in", "db_cons", ":", "db_cons", "[", "connection", "]", ".", "commit", "(", ")" ]
tear down the test database .
train
false
17,522
def nonzero(a): return a.nonzero()
[ "def", "nonzero", "(", "a", ")", ":", "return", "a", ".", "nonzero", "(", ")" ]
return the indices of the elements that are non-zero .
train
false
17,525
def check_stoplimit_prices(price, label): try: if (not isfinite(price)): raise BadOrderParameters(msg='Attempted to place an order with a {} price of {}.'.format(label, price)) except TypeError: raise BadOrderParameters(msg='Attempted to place an order with a {} price of {}.'.format(label, type(price))) if (price < 0): raise BadOrderParameters(msg="Can't place a {} order with a negative price.".format(label))
[ "def", "check_stoplimit_prices", "(", "price", ",", "label", ")", ":", "try", ":", "if", "(", "not", "isfinite", "(", "price", ")", ")", ":", "raise", "BadOrderParameters", "(", "msg", "=", "'Attempted to place an order with a {} price of {}.'", ".", "format", "...
check to make sure the stop/limit prices are reasonable and raise a badorderparameters exception if not .
train
true
17,526
def do_mark_unsafe(value): return text_type(value)
[ "def", "do_mark_unsafe", "(", "value", ")", ":", "return", "text_type", "(", "value", ")" ]
mark a value as unsafe .
train
false
17,527
def add_logs(logx, logy): if (logx < (logy + _ADD_LOGS_MAX_DIFF)): return logy if (logy < (logx + _ADD_LOGS_MAX_DIFF)): return logx base = min(logx, logy) return (base + math.log(((2 ** (logx - base)) + (2 ** (logy - base))), 2))
[ "def", "add_logs", "(", "logx", ",", "logy", ")", ":", "if", "(", "logx", "<", "(", "logy", "+", "_ADD_LOGS_MAX_DIFF", ")", ")", ":", "return", "logy", "if", "(", "logy", "<", "(", "logx", "+", "_ADD_LOGS_MAX_DIFF", ")", ")", ":", "return", "logx", ...
given two numbers logx = *log(x)* and logy = *log(y)* .
train
false
17,528
def check_file_size(f, max_allowed_size): if (f.size > max_allowed_size): message = (_lazy(u'"%s" is too large (%sKB), the limit is %sKB') % (f.name, (f.size >> 10), (max_allowed_size >> 10))) raise FileTooLargeError(message)
[ "def", "check_file_size", "(", "f", ",", "max_allowed_size", ")", ":", "if", "(", "f", ".", "size", ">", "max_allowed_size", ")", ":", "message", "=", "(", "_lazy", "(", "u'\"%s\" is too large (%sKB), the limit is %sKB'", ")", "%", "(", "f", ".", "name", ","...
check the file size of f is less than max_allowed_size raise filetoolargeerror if the check fails .
train
false
17,529
def FlagCxx14Features(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] include = Match('\\s*#\\s*include\\s+[<"]([^<"]+)[">]', line) if (include and (include.group(1) in ('scoped_allocator', 'shared_mutex'))): error(filename, linenum, 'build/c++14', 5, ('<%s> is an unapproved C++14 header.' % include.group(1)))
[ "def", "FlagCxx14Features", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "include", "=", "Match", "(", "'\\\\s*#\\\\s*include\\\\s+[<\"]([^<\"]+)[\">]'", ",", "line", ...
flag those c++14 features that we restrict .
train
true
17,530
def entry_list_list(options): with Session() as session: try: entry_list = get_list_by_exact_name(options.list_name, session=session) except NoResultFound: console(u'Could not find entry list with name {}'.format(options.list_name)) return header = [u'#', u'Title', u'# of fields'] table_data = [header] for entry in get_entries_by_list_id(entry_list.id, order_by=u'added', descending=True, session=session): table_data.append([entry.id, entry.title, len(entry.entry)]) try: table = TerminalTable(options.table_type, table_data) console(table.output) except TerminalTableError as e: console((u'ERROR: %s' % str(e)))
[ "def", "entry_list_list", "(", "options", ")", ":", "with", "Session", "(", ")", "as", "session", ":", "try", ":", "entry_list", "=", "get_list_by_exact_name", "(", "options", ".", "list_name", ",", "session", "=", "session", ")", "except", "NoResultFound", ...
list entry list .
train
false
17,531
def toResponse(stanza, stanzaType=None): toAddr = stanza.getAttribute('from') fromAddr = stanza.getAttribute('to') stanzaID = stanza.getAttribute('id') response = domish.Element((None, stanza.name)) if toAddr: response['to'] = toAddr if fromAddr: response['from'] = fromAddr if stanzaID: response['id'] = stanzaID if stanzaType: response['type'] = stanzaType return response
[ "def", "toResponse", "(", "stanza", ",", "stanzaType", "=", "None", ")", ":", "toAddr", "=", "stanza", ".", "getAttribute", "(", "'from'", ")", "fromAddr", "=", "stanza", ".", "getAttribute", "(", "'to'", ")", "stanzaID", "=", "stanza", ".", "getAttribute"...
create a response stanza from another stanza .
train
false
17,532
def _make_subclasshook(cls): if isinstance(cls.__extra__, abc.ABCMeta): def __extrahook__(cls, subclass): _valid_for_check(cls) res = cls.__extra__.__subclasshook__(subclass) if (res is not NotImplemented): return res if (cls.__extra__ in getattr(subclass, u'__mro__', ())): return True for scls in cls.__extra__.__subclasses__(): if isinstance(scls, GenericMeta): continue if issubclass(subclass, scls): return True return NotImplemented else: def __extrahook__(cls, subclass): _valid_for_check(cls) if (cls.__extra__ and issubclass(subclass, cls.__extra__)): return True return NotImplemented return classmethod(__extrahook__)
[ "def", "_make_subclasshook", "(", "cls", ")", ":", "if", "isinstance", "(", "cls", ".", "__extra__", ",", "abc", ".", "ABCMeta", ")", ":", "def", "__extrahook__", "(", "cls", ",", "subclass", ")", ":", "_valid_for_check", "(", "cls", ")", "res", "=", "...
construct a __subclasshook__ callable that incorporates the associated __extra__ class in subclass checks performed against cls .
train
true
17,533
def test_spinner_non_unicode_console(): stream = FakeTTY(u'ascii') chars = console.Spinner._default_unicode_chars with console.Spinner(u'Reticulating splines', file=stream, chars=chars) as s: next(s)
[ "def", "test_spinner_non_unicode_console", "(", ")", ":", "stream", "=", "FakeTTY", "(", "u'ascii'", ")", "chars", "=", "console", ".", "Spinner", ".", "_default_unicode_chars", "with", "console", ".", "Spinner", "(", "u'Reticulating splines'", ",", "file", "=", ...
regression test for #1760 ensures that the spinner can fall go into fallback mode when using the unicode spinner on a terminal whose default encoding cannot encode the unicode characters .
train
false
17,534
def index_to_slice(idx): return slice(idx, (idx + 1), None)
[ "def", "index_to_slice", "(", "idx", ")", ":", "return", "slice", "(", "idx", ",", "(", "idx", "+", "1", ")", ",", "None", ")" ]
converts an index to a slice .
train
false
17,535
def contentfilter(fsname, pattern): if (pattern is None): return True try: f = open(fsname) prog = re.compile(pattern) for line in f: if prog.match(line): f.close() return True f.close() except: pass return False
[ "def", "contentfilter", "(", "fsname", ",", "pattern", ")", ":", "if", "(", "pattern", "is", "None", ")", ":", "return", "True", "try", ":", "f", "=", "open", "(", "fsname", ")", "prog", "=", "re", ".", "compile", "(", "pattern", ")", "for", "line"...
filter files which contain the given expression .
train
false
17,537
def update_system(version='', ruby=None, runas=None, gem_bin=None): return _gem(['update', '--system', version], ruby, gem_bin=gem_bin, runas=runas)
[ "def", "update_system", "(", "version", "=", "''", ",", "ruby", "=", "None", ",", "runas", "=", "None", ",", "gem_bin", "=", "None", ")", ":", "return", "_gem", "(", "[", "'update'", ",", "'--system'", ",", "version", "]", ",", "ruby", ",", "gem_bin"...
update rubygems .
train
true
17,538
def _coerceToFilesystemEncoding(path, newpath, encoding=None): if (type(path) == bytes): return _asFilesystemBytes(newpath, encoding=encoding) else: return _asFilesystemText(newpath, encoding=encoding)
[ "def", "_coerceToFilesystemEncoding", "(", "path", ",", "newpath", ",", "encoding", "=", "None", ")", ":", "if", "(", "type", "(", "path", ")", "==", "bytes", ")", ":", "return", "_asFilesystemBytes", "(", "newpath", ",", "encoding", "=", "encoding", ")", ...
return a c{newpath} that is suitable for joining to c{path} .
train
false
17,539
def _sanitize_index(data, index, copy=False): if (index is None): return data if (len(data) != len(index)): raise ValueError('Length of values does not match length of index') if isinstance(data, PeriodIndex): data = data.asobject elif isinstance(data, DatetimeIndex): data = data._to_embed(keep_tz=True) if copy: data = data.copy() elif isinstance(data, np.ndarray): if (data.dtype.kind in ['M', 'm']): data = _sanitize_array(data, index, copy=copy) return data
[ "def", "_sanitize_index", "(", "data", ",", "index", ",", "copy", "=", "False", ")", ":", "if", "(", "index", "is", "None", ")", ":", "return", "data", "if", "(", "len", "(", "data", ")", "!=", "len", "(", "index", ")", ")", ":", "raise", "ValueE...
sanitize an index type to return an ndarray of the underlying .
train
false
17,540
def addCylinderOutputByEndStart(endZ, inradiusComplex, outputs, sides, start, topOverBottom=1.0): inradius = Vector3(inradiusComplex.real, inradiusComplex.imag, (0.5 * abs((endZ - start.z)))) cylinderOutput = getGeometryOutput(inradius, sides, topOverBottom) vertexes = matrix.getVertexes(cylinderOutput) if (endZ < start.z): for vertex in vertexes: vertex.z = (- vertex.z) translation = Vector3(start.x, start.y, (inradius.z + min(start.z, endZ))) euclidean.translateVector3Path(vertexes, translation) outputs.append(cylinderOutput)
[ "def", "addCylinderOutputByEndStart", "(", "endZ", ",", "inradiusComplex", ",", "outputs", ",", "sides", ",", "start", ",", "topOverBottom", "=", "1.0", ")", ":", "inradius", "=", "Vector3", "(", "inradiusComplex", ".", "real", ",", "inradiusComplex", ".", "im...
add cylinder triangle mesh by endz .
train
false
17,541
def get_hub(): try: import select if hasattr(select, 'poll'): return 'poll' return 'selects' except ImportError: return None
[ "def", "get_hub", "(", ")", ":", "try", ":", "import", "select", "if", "hasattr", "(", "select", ",", "'poll'", ")", ":", "return", "'poll'", "return", "'selects'", "except", "ImportError", ":", "return", "None" ]
return the hub for the current thread .
train
false
17,542
def test_empty_lists(Chart): chart = Chart() chart.add('A', [1, 2]) chart.add('B', []) if (not chart._dual): chart.x_labels = ('red', 'green', 'blue') q = chart.render_pyquery() assert (len(q('.legend')) == 2)
[ "def", "test_empty_lists", "(", "Chart", ")", ":", "chart", "=", "Chart", "(", ")", "chart", ".", "add", "(", "'A'", ",", "[", "1", ",", "2", "]", ")", "chart", ".", "add", "(", "'B'", ",", "[", "]", ")", "if", "(", "not", "chart", ".", "_dua...
test chart rendering with an empty serie .
train
false
17,543
def pci_device_update(context, node_id, address, value): return IMPL.pci_device_update(context, node_id, address, value)
[ "def", "pci_device_update", "(", "context", ",", "node_id", ",", "address", ",", "value", ")", ":", "return", "IMPL", ".", "pci_device_update", "(", "context", ",", "node_id", ",", "address", ",", "value", ")" ]
update a pci device .
train
false
17,546
def pull_raw(url, name, verify=False): return _pull_image('raw', url, name, verify=verify)
[ "def", "pull_raw", "(", "url", ",", "name", ",", "verify", "=", "False", ")", ":", "return", "_pull_image", "(", "'raw'", ",", "url", ",", "name", ",", "verify", "=", "verify", ")" ]
execute a machinectl pull-raw to download a .
train
true
17,548
def _item_to_job(iterator, resource): return iterator.client.job_from_resource(resource)
[ "def", "_item_to_job", "(", "iterator", ",", "resource", ")", ":", "return", "iterator", ".", "client", ".", "job_from_resource", "(", "resource", ")" ]
convert a json job to the native object .
train
false
17,549
def a1_to_rowcol(label): m = CELL_ADDR_RE.match(label) if m: column_label = m.group(1).upper() row = int(m.group(2)) col = 0 for (i, c) in enumerate(reversed(column_label)): col += ((ord(c) - MAGIC_NUMBER) * (26 ** i)) else: raise IncorrectCellLabel(label) return (row, col)
[ "def", "a1_to_rowcol", "(", "label", ")", ":", "m", "=", "CELL_ADDR_RE", ".", "match", "(", "label", ")", "if", "m", ":", "column_label", "=", "m", ".", "group", "(", "1", ")", ".", "upper", "(", ")", "row", "=", "int", "(", "m", ".", "group", ...
translates a cells address in a1 notation to a tuple of integers .
train
true
17,550
def find_split_rechunk(old_chunks, new_chunks, graph_size_limit): ndim = len(old_chunks) chunks = list(old_chunks) for dim in range(ndim): graph_size = estimate_graph_size(chunks, new_chunks) if (graph_size > graph_size_limit): break if (len(old_chunks[dim]) > len(new_chunks[dim])): continue max_number = int(((len(old_chunks[dim]) * graph_size_limit) / graph_size)) c = merge_to_number(new_chunks[dim], max_number) assert (len(c) <= max_number) if ((len(c) >= len(old_chunks[dim])) and (max(c) <= max(old_chunks[dim]))): chunks[dim] = c return tuple(chunks)
[ "def", "find_split_rechunk", "(", "old_chunks", ",", "new_chunks", ",", "graph_size_limit", ")", ":", "ndim", "=", "len", "(", "old_chunks", ")", "chunks", "=", "list", "(", "old_chunks", ")", "for", "dim", "in", "range", "(", "ndim", ")", ":", "graph_size...
find an intermediate rechunk that would split some chunks to get us nearer *new_chunks* .
train
false
17,551
def test_filelock_passes(tmpdir): package_name = 'conda_file1' tmpfile = join(tmpdir.strpath, package_name) with FileLock(tmpfile) as lock: path = basename(lock.lock_file_path) assert (tmpdir.join(path).exists() and tmpdir.join(path).isfile()) assert (not tmpdir.join(path).exists())
[ "def", "test_filelock_passes", "(", "tmpdir", ")", ":", "package_name", "=", "'conda_file1'", "tmpfile", "=", "join", "(", "tmpdir", ".", "strpath", ",", "package_name", ")", "with", "FileLock", "(", "tmpfile", ")", "as", "lock", ":", "path", "=", "basename"...
normal test on file lock .
train
false
17,553
def lz4_decode(payload): ctx = lz4f.createDecompContext() data = lz4f.decompressFrame(payload, ctx) if (data['next'] != 0): raise RuntimeError('lz4f unable to decompress full payload') return data['decomp']
[ "def", "lz4_decode", "(", "payload", ")", ":", "ctx", "=", "lz4f", ".", "createDecompContext", "(", ")", "data", "=", "lz4f", ".", "decompressFrame", "(", "payload", ",", "ctx", ")", "if", "(", "data", "[", "'next'", "]", "!=", "0", ")", ":", "raise"...
decode payload using interoperable lz4 framing .
train
false
17,556
def extract_gcs_tokens(full_object_name): bucket_name = None object_name = None tokens = full_object_name.split('/') if (len(tokens) < 3): logging.error("Malformed GCS path '{0}'. Aborting GCS operation.".format(full_object_name)) return (bucket_name, object_name) bucket_name = tokens[2] object_name = '' for token in tokens[3:(-1)]: object_name += (token + '/') object_name += tokens[(-1)] return (bucket_name, object_name)
[ "def", "extract_gcs_tokens", "(", "full_object_name", ")", ":", "bucket_name", "=", "None", "object_name", "=", "None", "tokens", "=", "full_object_name", ".", "split", "(", "'/'", ")", "if", "(", "len", "(", "tokens", ")", "<", "3", ")", ":", "logging", ...
extracts the bucket and object name from a full gcs path .
train
false
17,557
def RemoveFlags(flag_values=FLAGS): for flag_name in NamesOfDefinedFlags(): module_bar.RemoveOneFlag(flag_name, flag_values=flag_values) module_bar.RemoveFlags(flag_values=flag_values)
[ "def", "RemoveFlags", "(", "flag_values", "=", "FLAGS", ")", ":", "for", "flag_name", "in", "NamesOfDefinedFlags", "(", ")", ":", "module_bar", ".", "RemoveOneFlag", "(", "flag_name", ",", "flag_values", "=", "flag_values", ")", "module_bar", ".", "RemoveFlags",...
deletes the flag definitions done by the above defineflags() .
train
false
17,558
def unpack_bitstring(string): byte_count = len(string) bits = [] for byte in range(byte_count): value = ord(string[byte]) for _ in range(8): bits.append(((value & 1) == 1)) value >>= 1 return bits
[ "def", "unpack_bitstring", "(", "string", ")", ":", "byte_count", "=", "len", "(", "string", ")", "bits", "=", "[", "]", "for", "byte", "in", "range", "(", "byte_count", ")", ":", "value", "=", "ord", "(", "string", "[", "byte", "]", ")", "for", "_...
creates bit array out of a string .
train
false
17,560
def require_user(handler): def test_login(self, **kwargs): 'Checks if the user for the current session is logged in.' if (not self.user_id): self.redirect(current_user_services.create_login_url(self.request.uri)) return return handler(self, **kwargs) return test_login
[ "def", "require_user", "(", "handler", ")", ":", "def", "test_login", "(", "self", ",", "**", "kwargs", ")", ":", "if", "(", "not", "self", ".", "user_id", ")", ":", "self", ".", "redirect", "(", "current_user_services", ".", "create_login_url", "(", "se...
decorator that checks if a user is associated to the current session .
train
false
17,561
def slip_reader(port): partial_packet = None in_escape = False while True: waiting = port.inWaiting() read_bytes = port.read((1 if (waiting == 0) else waiting)) if (read_bytes == ''): raise FatalError(('Timed out waiting for packet %s' % ('header' if (partial_packet is None) else 'content'))) for b in read_bytes: if (type(b) is int): b = bytes([b]) if (partial_packet is None): if (b == '\xc0'): partial_packet = '' else: raise FatalError(('Invalid head of packet (%r)' % b)) elif in_escape: in_escape = False if (b == '\xdc'): partial_packet += '\xc0' elif (b == '\xdd'): partial_packet += '\xdb' else: raise FatalError(('Invalid SLIP escape (%r%r)' % ('\xdb', b))) elif (b == '\xdb'): in_escape = True elif (b == '\xc0'): (yield partial_packet) partial_packet = None else: partial_packet += b
[ "def", "slip_reader", "(", "port", ")", ":", "partial_packet", "=", "None", "in_escape", "=", "False", "while", "True", ":", "waiting", "=", "port", ".", "inWaiting", "(", ")", "read_bytes", "=", "port", ".", "read", "(", "(", "1", "if", "(", "waiting"...
generator to read slip packets from a serial port .
train
false
17,562
def test_brainvision_data_partially_disabled_hw_filters(): with warnings.catch_warnings(record=True) as w: raw = _test_raw_reader(read_raw_brainvision, vhdr_fname=vhdr_partially_disabled_hw_filter_path, montage=montage, eog=eog) trigger_warning = [('parse triggers that' in str(ww.message)) for ww in w] lowpass_warning = [('different lowpass filters' in str(ww.message)) for ww in w] highpass_warning = [('different highpass filters' in str(ww.message)) for ww in w] expected_warnings = zip(trigger_warning, lowpass_warning, highpass_warning) assert_true(all((any([trg, lp, hp]) for (trg, lp, hp) in expected_warnings))) assert_equal(raw.info['highpass'], 0.0) assert_equal(raw.info['lowpass'], 500.0)
[ "def", "test_brainvision_data_partially_disabled_hw_filters", "(", ")", ":", "with", "warnings", ".", "catch_warnings", "(", "record", "=", "True", ")", "as", "w", ":", "raw", "=", "_test_raw_reader", "(", "read_raw_brainvision", ",", "vhdr_fname", "=", "vhdr_partia...
test heterogeneous filter settings including non-numeric values .
train
false
17,563
def soft_break(value, length, process=(lambda chunk: chunk)): delimiters = re.compile('([{}]+)'.format(''.join(map(re.escape, ',.$:/+@!?()<>[]{}')))) def soft_break_delimiter(match): results = [] value = match.group(0) chunks = delimiters.split(value) for (i, chunk) in enumerate(chunks): if ((i % 2) == 1): results.extend([chunk, u'\u200b']) else: results.append(process(chunk)) return u''.join(results).rstrip(u'\u200b') return re.sub('\\S{{{},}}'.format(length), soft_break_delimiter, value)
[ "def", "soft_break", "(", "value", ",", "length", ",", "process", "=", "(", "lambda", "chunk", ":", "chunk", ")", ")", ":", "delimiters", "=", "re", ".", "compile", "(", "'([{}]+)'", ".", "format", "(", "''", ".", "join", "(", "map", "(", "re", "."...
encourages soft breaking of text values above a maximum length by adding zero-width spaces after common delimeters .
train
false
17,564
def put_tagging(Bucket, region=None, key=None, keyid=None, profile=None, **kwargs): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) tagslist = [] for (k, v) in six.iteritems(kwargs): if str(k).startswith('__'): continue tagslist.append({'Key': str(k), 'Value': str(v)}) conn.put_bucket_tagging(Bucket=Bucket, Tagging={'TagSet': tagslist}) return {'updated': True, 'name': Bucket} except ClientError as e: return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
[ "def", "put_tagging", "(", "Bucket", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ",", "**", "kwargs", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ","...
given a valid config .
train
true
17,566
def create_gzipped_copy(in_path, out_path, command=None): if command: subprocess.check_call(shlex.split(command.format(filename=in_path))) else: with gzip.GzipFile(out_path, 'wb+') as outf: with open(in_path, 'rb') as inf: outf.write(inf.read())
[ "def", "create_gzipped_copy", "(", "in_path", ",", "out_path", ",", "command", "=", "None", ")", ":", "if", "command", ":", "subprocess", ".", "check_call", "(", "shlex", ".", "split", "(", "command", ".", "format", "(", "filename", "=", "in_path", ")", ...
create gzipped copy of in_path and save it as out_path .
train
false
17,567
def get_variables_with_name(name, train_only=True, printable=False): print (' Get variables with %s' % name) t_vars = (tf.trainable_variables() if train_only else tf.all_variables()) d_vars = [var for var in t_vars if (name in var.name)] if printable: for (idx, v) in enumerate(d_vars): print ' got {:3}: {:15} {}'.format(idx, v.name, str(v.get_shape())) return d_vars
[ "def", "get_variables_with_name", "(", "name", ",", "train_only", "=", "True", ",", "printable", "=", "False", ")", ":", "print", "(", "' Get variables with %s'", "%", "name", ")", "t_vars", "=", "(", "tf", ".", "trainable_variables", "(", ")", "if", "train...
get variable list by a given name scope .
train
false
17,568
def p_postfix_expression_8(t): pass
[ "def", "p_postfix_expression_8", "(", "t", ")", ":", "pass" ]
postfix_expression : postfix_expression minusminus .
train
false
17,569
def get_custom_fields(data): data_copy = deepcopy(data) field_names = {} for (key, value) in data.iteritems(): if (key in DEFAULT_FIELD_NAMES): field_names[key] = data_copy.pop(key) return (field_names, data_copy)
[ "def", "get_custom_fields", "(", "data", ")", ":", "data_copy", "=", "deepcopy", "(", "data", ")", "field_names", "=", "{", "}", "for", "(", "key", ",", "value", ")", "in", "data", ".", "iteritems", "(", ")", ":", "if", "(", "key", "in", "DEFAULT_FIE...
return two dicts .
train
false
17,570
def _match_mismatch_counter(s1, p1, s2, p2): if (s1[p1] == s2[p2]): cmp_fn = operator.eq match = True else: cmp_fn = operator.ne match = False count = 1 p1 += 1 p2 += 1 while ((p1 < len(s1)) and (p2 < len(s2)) and cmp_fn(s1[p1], s2[p2])): count += 1 p1 += 1 p2 += 1 return (match, count)
[ "def", "_match_mismatch_counter", "(", "s1", ",", "p1", ",", "s2", ",", "p2", ")", ":", "if", "(", "s1", "[", "p1", "]", "==", "s2", "[", "p2", "]", ")", ":", "cmp_fn", "=", "operator", ".", "eq", "match", "=", "True", "else", ":", "cmp_fn", "=...
count consecutive matches/mismatches between strings s1 and s2 starting at p1 and p2 .
train
false
17,572
def _setupTempDirectory(filename): tmpDir = tempfile.mkdtemp() tmpFileName = os.path.join(tmpDir, os.path.basename(filename)) return (tmpDir, tmpFileName)
[ "def", "_setupTempDirectory", "(", "filename", ")", ":", "tmpDir", "=", "tempfile", ".", "mkdtemp", "(", ")", "tmpFileName", "=", "os", ".", "path", ".", "join", "(", "tmpDir", ",", "os", ".", "path", ".", "basename", "(", "filename", ")", ")", "return...
create a temp directory .
train
false
17,574
def LocateFileName(fileNamesString, searchPaths): import regutil, string, os fileNames = string.split(fileNamesString, ';') for path in searchPaths: for fileName in fileNames: try: retPath = os.path.join(path, fileName) os.stat(retPath) break except os.error: retPath = None if retPath: break else: fileName = fileNames[0] try: import win32ui, win32con except ImportError: raise error, ('Need to locate the file %s, but the win32ui module is not available\nPlease run the program again, passing as a parameter the path to this file.' % fileName) flags = win32con.OFN_FILEMUSTEXIST ext = os.path.splitext(fileName)[1] filter = ('Files of requested type (*%s)|*%s||' % (ext, ext)) dlg = win32ui.CreateFileDialog(1, None, fileName, flags, filter, None) dlg.SetOFNTitle(('Locate ' + fileName)) if (dlg.DoModal() != win32con.IDOK): raise KeyboardInterrupt, 'User cancelled the process' retPath = dlg.GetPathName() return os.path.abspath(retPath)
[ "def", "LocateFileName", "(", "fileNamesString", ",", "searchPaths", ")", ":", "import", "regutil", ",", "string", ",", "os", "fileNames", "=", "string", ".", "split", "(", "fileNamesString", ",", "';'", ")", "for", "path", "in", "searchPaths", ":", "for", ...
locate a file name .
train
false
17,576
def _gen_get_more_command(cursor_id, coll, batch_size, max_await_time_ms): cmd = SON([('getMore', cursor_id), ('collection', coll)]) if batch_size: cmd['batchSize'] = batch_size if (max_await_time_ms is not None): cmd['maxTimeMS'] = max_await_time_ms return cmd
[ "def", "_gen_get_more_command", "(", "cursor_id", ",", "coll", ",", "batch_size", ",", "max_await_time_ms", ")", ":", "cmd", "=", "SON", "(", "[", "(", "'getMore'", ",", "cursor_id", ")", ",", "(", "'collection'", ",", "coll", ")", "]", ")", "if", "batch...
generate a getmore command document .
train
true
17,577
def library_keyword_tags_with_documentation(): pass
[ "def", "library_keyword_tags_with_documentation", "(", ")", ":", "pass" ]
summary line tags: are read only from the last line tags: one .
train
false
17,578
def register_plugin_class(base_class, file_path, class_name): plugin_dir = os.path.dirname(os.path.realpath(file_path)) _register_plugin_path(plugin_dir) module_name = _get_plugin_module(file_path) if (module_name is None): return None module = imp.load_source(module_name, file_path) klass = getattr(module, class_name, None) if (not klass): raise Exception(('Plugin file "%s" doesn\'t expose class named "%s"' % (file_path, class_name))) _register_plugin(base_class, klass) return klass
[ "def", "register_plugin_class", "(", "base_class", ",", "file_path", ",", "class_name", ")", ":", "plugin_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "file_path", ")", ")", "_register_plugin_path", "(", "plugin...
retrieve a register plugin class from the provided file .
train
false
17,579
@requires_version('scipy', '0.16') def test_estimate_ringing(): for kind in ('ba', 'sos'): for (thresh, lims) in ((0.1, (30, 60)), (0.01, (300, 600)), (0.001, (3000, 6000)), (0.0001, (30000, 60000))): n_ring = estimate_ringing_samples(butter(3, thresh, output=kind)) assert_true((lims[0] <= n_ring <= lims[1]), msg=('%s %s: %s <= %s <= %s' % (kind, thresh, lims[0], n_ring, lims[1]))) with warnings.catch_warnings(record=True) as w: assert_equal(estimate_ringing_samples(butter(4, 1e-05)), 100000) assert_true(any((('properly estimate' in str(ww.message)) for ww in w)))
[ "@", "requires_version", "(", "'scipy'", ",", "'0.16'", ")", "def", "test_estimate_ringing", "(", ")", ":", "for", "kind", "in", "(", "'ba'", ",", "'sos'", ")", ":", "for", "(", "thresh", ",", "lims", ")", "in", "(", "(", "0.1", ",", "(", "30", ","...
test our ringing estimation function .
train
false
17,581
@pytest.fixture def fake_statusbar(qtbot): container = QWidget() qtbot.add_widget(container) vbox = QVBoxLayout(container) vbox.addStretch() statusbar = FakeStatusBar(container) statusbar.container = container vbox.addWidget(statusbar) with qtbot.waitExposed(container): container.show() return statusbar
[ "@", "pytest", ".", "fixture", "def", "fake_statusbar", "(", "qtbot", ")", ":", "container", "=", "QWidget", "(", ")", "qtbot", ".", "add_widget", "(", "container", ")", "vbox", "=", "QVBoxLayout", "(", "container", ")", "vbox", ".", "addStretch", "(", "...
fixture providing a statusbar in a container window .
train
false
17,582
def frag_remover(ack, load): global pkt_frag_loads while (len(pkt_frag_loads) > 50): pkt_frag_loads.popitem(last=False) copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads) for ip_port in copy_pkt_frag_loads: if (len(copy_pkt_frag_loads[ip_port]) > 0): while (len(copy_pkt_frag_loads[ip_port]) > 25): pkt_frag_loads[ip_port].popitem(last=False) copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads) for ip_port in copy_pkt_frag_loads: for ack in copy_pkt_frag_loads[ip_port]: if (len(copy_pkt_frag_loads[ip_port][ack]) > 5000): pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][(-200):]
[ "def", "frag_remover", "(", "ack", ",", "load", ")", ":", "global", "pkt_frag_loads", "while", "(", "len", "(", "pkt_frag_loads", ")", ">", "50", ")", ":", "pkt_frag_loads", ".", "popitem", "(", "last", "=", "False", ")", "copy_pkt_frag_loads", "=", "copy"...
keep the filo ordereddict of frag loads from getting too large 3 points of limit: number of ip_ports < 50 number of acks per ip:port < 25 number of chars in load < 5000 .
train
false
17,586
def edgeNormals(v, e): return mapEdges((lambda a, (p, n): getNormal(a, p, n)), v, e)
[ "def", "edgeNormals", "(", "v", ",", "e", ")", ":", "return", "mapEdges", "(", "(", "lambda", "a", ",", "(", "p", ",", "n", ")", ":", "getNormal", "(", "a", ",", "p", ",", "n", ")", ")", ",", "v", ",", "e", ")" ]
assumes a mesh where each vertex has exactly least two edges .
train
false
17,587
def get_github_email(access_token): request = Request(u'https://api.github.com/user/emails') request.timeout = 1.0 request.add_header(u'User-Agent', USER_AGENT) request.add_header(u'Authorization', u'token {0}'.format(access_token)) handle = urlopen(request) data = json.loads(handle.read().decode(u'utf-8')) email = None for entry in data: if (not entry[u'verified']): continue email = entry[u'email'] if entry[u'primary']: break return email
[ "def", "get_github_email", "(", "access_token", ")", ":", "request", "=", "Request", "(", "u'https://api.github.com/user/emails'", ")", "request", ".", "timeout", "=", "1.0", "request", ".", "add_header", "(", "u'User-Agent'", ",", "USER_AGENT", ")", "request", "....
get real email from github .
train
false
17,588
def _check_conditions_permissions(user, permissions, course_id, content): def test(user, per, operator='or'): if isinstance(per, basestring): if (per in CONDITIONS): return _check_condition(user, per, content) return has_permission(user, per, course_id=course_id) elif (isinstance(per, list) and (operator in ['and', 'or'])): results = [test(user, x, operator='and') for x in per] if (operator == 'or'): return (True in results) elif (operator == 'and'): return (False not in results) return test(user, permissions, operator='or')
[ "def", "_check_conditions_permissions", "(", "user", ",", "permissions", ",", "course_id", ",", "content", ")", ":", "def", "test", "(", "user", ",", "per", ",", "operator", "=", "'or'", ")", ":", "if", "isinstance", "(", "per", ",", "basestring", ")", "...
accepts a list of permissions and proceed if any of the permission is valid .
train
false
17,589
def protectResource(resource, config): return DummyAuthResource(resource)
[ "def", "protectResource", "(", "resource", ",", "config", ")", ":", "return", "DummyAuthResource", "(", "resource", ")" ]
dummy resource protector .
train
false
17,590
@register.simple_tag def reviewer_list(review_request): return humanize_list(([(group.display_name or group.name) for group in review_request.target_groups.all()] + [(user.get_full_name() or user.username) for user in review_request.target_people.all()]))
[ "@", "register", ".", "simple_tag", "def", "reviewer_list", "(", "review_request", ")", ":", "return", "humanize_list", "(", "(", "[", "(", "group", ".", "display_name", "or", "group", ".", "name", ")", "for", "group", "in", "review_request", ".", "target_gr...
returns a humanized list of target reviewers in a review request .
train
false
17,591
def sync_all(saltenv=None, refresh=True): log.debug('Syncing all') ret = {} ret['beacons'] = sync_beacons(saltenv, False) ret['modules'] = sync_modules(saltenv, False) ret['states'] = sync_states(saltenv, False) ret['sdb'] = sync_sdb(saltenv) ret['grains'] = sync_grains(saltenv, False) ret['renderers'] = sync_renderers(saltenv, False) ret['returners'] = sync_returners(saltenv, False) ret['output'] = sync_output(saltenv, False) ret['utils'] = sync_utils(saltenv, False) ret['log_handlers'] = sync_log_handlers(saltenv, False) ret['proxymodules'] = sync_proxymodules(saltenv, False) ret['engines'] = sync_engines(saltenv, False) if (__opts__['file_client'] == 'local'): ret['pillar'] = sync_pillar(saltenv, False) if refresh: refresh_modules() refresh_pillar() return ret
[ "def", "sync_all", "(", "saltenv", "=", "None", ",", "refresh", "=", "True", ")", ":", "log", ".", "debug", "(", "'Syncing all'", ")", "ret", "=", "{", "}", "ret", "[", "'beacons'", "]", "=", "sync_beacons", "(", "saltenv", ",", "False", ")", "ret", ...
sync all custom types saltenv : base the fileserver environment from which to sync .
train
false
17,593
def additionalAssignment(MobileAllocation_presence=0, StartingTime_presence=0): a = TpPd(pd=6) b = MessageType(mesType=59) c = ChannelDescription() packet = ((a / b) / c) if (MobileAllocation_presence is 1): d = MobileAllocationHdr(ieiMA=114, eightBitMA=0) packet = (packet / d) if (StartingTime_presence is 1): e = StartingTimeHdr(ieiST=124, eightBitST=0) packet = (packet / e) return packet
[ "def", "additionalAssignment", "(", "MobileAllocation_presence", "=", "0", ",", "StartingTime_presence", "=", "0", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "6", ")", "b", "=", "MessageType", "(", "mesType", "=", "59", ")", "c", "=", "ChannelDescription"...
additional assignment section 9 .
train
true
17,594
def parse_s3_url(url): bucket = '' path = '' if url: result = urlparse.urlparse(url) bucket = result.netloc path = result.path.strip('/') return (bucket, path)
[ "def", "parse_s3_url", "(", "url", ")", ":", "bucket", "=", "''", "path", "=", "''", "if", "url", ":", "result", "=", "urlparse", ".", "urlparse", "(", "url", ")", "bucket", "=", "result", ".", "netloc", "path", "=", "result", ".", "path", ".", "st...
parses s3 url .
train
true
17,599
def scans_for_fnames(fnames, keep4d=False, separate_sessions=False): flist = None if (not isinstance(fnames[0], list)): if func_is_3d(fnames[0]): fnames = [fnames] if (separate_sessions or keep4d): flist = np.zeros((len(fnames),), dtype=object) for (i, f) in enumerate(fnames): if separate_sessions: if keep4d: if isinstance(f, list): flist[i] = np.array(f, dtype=object) else: flist[i] = np.array([f], dtype=object) else: flist[i] = scans_for_fname(f) elif keep4d: flist[i] = f else: scans = scans_for_fname(f) if (flist is None): flist = scans else: flist = np.concatenate((flist, scans)) return flist
[ "def", "scans_for_fnames", "(", "fnames", ",", "keep4d", "=", "False", ",", "separate_sessions", "=", "False", ")", ":", "flist", "=", "None", "if", "(", "not", "isinstance", "(", "fnames", "[", "0", "]", ",", "list", ")", ")", ":", "if", "func_is_3d",...
converts a list of files to a concatenated numpy array for each volume .
train
false
17,600
@staff_member_required def admin_keywords_submit(request): (keyword_ids, titles) = ([], []) remove = punctuation.replace(u'-', u'') for title in request.POST.get(u'text_keywords', u'').split(u','): title = u''.join([c for c in title if (c not in remove)]).strip() if title: (kw, created) = Keyword.objects.get_or_create_iexact(title=title) keyword_id = str(kw.id) if (keyword_id not in keyword_ids): keyword_ids.append(keyword_id) titles.append(title) return HttpResponse((u'%s|%s' % (u','.join(keyword_ids), u', '.join(titles))), content_type=u'text/plain')
[ "@", "staff_member_required", "def", "admin_keywords_submit", "(", "request", ")", ":", "(", "keyword_ids", ",", "titles", ")", "=", "(", "[", "]", ",", "[", "]", ")", "remove", "=", "punctuation", ".", "replace", "(", "u'-'", ",", "u''", ")", "for", "...
adds any new given keywords from the custom keywords field in the admin .
train
false
17,604
def get_theano_hamiltonian_functions(model_vars, shared, logpt, potential, use_single_leapfrog=False, **theano_kwargs): (H, q) = _theano_hamiltonian(model_vars, shared, logpt, potential) (energy_function, p) = _theano_energy_function(H, q, **theano_kwargs) if use_single_leapfrog: leapfrog_integrator = _theano_single_leapfrog(H, q, p, **theano_kwargs) else: leapfrog_integrator = _theano_leapfrog_integrator(H, q, p, **theano_kwargs) return (H, energy_function, leapfrog_integrator, {'q': q, 'p': p})
[ "def", "get_theano_hamiltonian_functions", "(", "model_vars", ",", "shared", ",", "logpt", ",", "potential", ",", "use_single_leapfrog", "=", "False", ",", "**", "theano_kwargs", ")", ":", "(", "H", ",", "q", ")", "=", "_theano_hamiltonian", "(", "model_vars", ...
construct theano functions for the hamiltonian .
train
false
17,605
def appendXmlTextNode(tag_name, text, parent): el = xmlTextNode(tag_name, text) parent.append(el) return el
[ "def", "appendXmlTextNode", "(", "tag_name", ",", "text", ",", "parent", ")", ":", "el", "=", "xmlTextNode", "(", "tag_name", ",", "text", ")", "parent", ".", "append", "(", "el", ")", "return", "el" ]
creates a new <tag_name> node and sets its content to text .
train
false
17,606
def download_device(headers, cookies, temp_target_disk, device_url, lease_updater, total_bytes_written, total_bytes_to_write): with open(temp_target_disk, 'wb') as handle: response = requests.get(device_url, stream=True, headers=headers, cookies=cookies, verify=False) if (not response.ok): response.raise_for_status() current_bytes_written = 0 for block in response.iter_content(chunk_size=2048): if block: handle.write(block) handle.flush() os.fsync(handle.fileno()) current_bytes_written += len(block) written_pct = (((current_bytes_written + total_bytes_written) * 100) / total_bytes_to_write) lease_updater.progressPercent = int(written_pct) return current_bytes_written
[ "def", "download_device", "(", "headers", ",", "cookies", ",", "temp_target_disk", ",", "device_url", ",", "lease_updater", ",", "total_bytes_written", ",", "total_bytes_to_write", ")", ":", "with", "open", "(", "temp_target_disk", ",", "'wb'", ")", "as", "handle"...
download disk device of httpnfclease .
train
true
17,607
def create_benefit(benefit_class, **kwargs): if (benefit_class.description is Benefit.description): raise RuntimeError("Your custom benefit must implement its own 'description' property") return Benefit.objects.create(proxy_class=_class_path(benefit_class), **kwargs)
[ "def", "create_benefit", "(", "benefit_class", ",", "**", "kwargs", ")", ":", "if", "(", "benefit_class", ".", "description", "is", "Benefit", ".", "description", ")", ":", "raise", "RuntimeError", "(", "\"Your custom benefit must implement its own 'description' propert...
create a custom benefit instance .
train
false
17,609
def is_key_line(key): if (not key): return False if (key[0] == u'#'): return False if (key[0] == u'@'): return False return ((u' ssh-rsa ' in key) or (u' ecdsa-sha2-nistp256 ' in key) or (u' ssh-ed25519 ' in key))
[ "def", "is_key_line", "(", "key", ")", ":", "if", "(", "not", "key", ")", ":", "return", "False", "if", "(", "key", "[", "0", "]", "==", "u'#'", ")", ":", "return", "False", "if", "(", "key", "[", "0", "]", "==", "u'@'", ")", ":", "return", "...
checks whether this line looks like a valid known_hosts line .
train
false
17,610
def _float_or_none(x, digits=3): if (x is None): return str(x) fmtstr = u'{0:.{digits}g}'.format(x, digits=digits) return fmtstr.format(x)
[ "def", "_float_or_none", "(", "x", ",", "digits", "=", "3", ")", ":", "if", "(", "x", "is", "None", ")", ":", "return", "str", "(", "x", ")", "fmtstr", "=", "u'{0:.{digits}g}'", ".", "format", "(", "x", ",", "digits", "=", "digits", ")", "return", ...
helper function to format a variable that can be a float or none .
train
false
17,611
def deflood(s, n=3): if (n == 0): return s[0:0] return re.sub(('((.)\\2{%s,})' % (n - 1)), (lambda m: (m.group(1)[0] * n)), s)
[ "def", "deflood", "(", "s", ",", "n", "=", "3", ")", ":", "if", "(", "n", "==", "0", ")", ":", "return", "s", "[", "0", ":", "0", "]", "return", "re", ".", "sub", "(", "(", "'((.)\\\\2{%s,})'", "%", "(", "n", "-", "1", ")", ")", ",", "(",...
returns the string with no more than n repeated characters .
train
true
17,612
def do_center(value, width=80): return text_type(value).center(width)
[ "def", "do_center", "(", "value", ",", "width", "=", "80", ")", ":", "return", "text_type", "(", "value", ")", ".", "center", "(", "width", ")" ]
centers the value in a field of a given width .
train
false
17,613
def create_candlestick(open, high, low, close, dates=None, direction='both', **kwargs): if (dates is not None): utils.validate_equal_length(open, high, low, close, dates) else: utils.validate_equal_length(open, high, low, close) validate_ohlc(open, high, low, close, direction, **kwargs) if (direction is 'increasing'): candle_incr_data = make_increasing_candle(open, high, low, close, dates, **kwargs) data = candle_incr_data elif (direction is 'decreasing'): candle_decr_data = make_decreasing_candle(open, high, low, close, dates, **kwargs) data = candle_decr_data else: candle_incr_data = make_increasing_candle(open, high, low, close, dates, **kwargs) candle_decr_data = make_decreasing_candle(open, high, low, close, dates, **kwargs) data = (candle_incr_data + candle_decr_data) layout = graph_objs.Layout() return graph_objs.Figure(data=data, layout=layout)
[ "def", "create_candlestick", "(", "open", ",", "high", ",", "low", ",", "close", ",", "dates", "=", "None", ",", "direction", "=", "'both'", ",", "**", "kwargs", ")", ":", "if", "(", "dates", "is", "not", "None", ")", ":", "utils", ".", "validate_equ...
beta function that creates a candlestick chart .
train
false
17,614
def _realpath(path): if (sys.platform == 'cygwin'): if (path[1:3] == ':\\'): return path elif (path[1:3] == ':/'): path = (('/cygdrive/' + path[0]) + path[2:]) return os.path.abspath(os.path.expanduser(path)) return os.path.realpath(os.path.abspath(os.path.expanduser(path)))
[ "def", "_realpath", "(", "path", ")", ":", "if", "(", "sys", ".", "platform", "==", "'cygwin'", ")", ":", "if", "(", "path", "[", "1", ":", "3", "]", "==", "':\\\\'", ")", ":", "return", "path", "elif", "(", "path", "[", "1", ":", "3", "]", "...
return the real path of path is equivalent to realpath(abspath(expanduser)) .
train
true
17,615
def get_tests_stanza(tests, is_server, prepend=None, append=None, client_control_file=''): assert (not (client_control_file and is_server)) if (not prepend): prepend = [] if (not append): append = [] raw_control_files = [read_control_file(test) for test in tests] return _get_tests_stanza(raw_control_files, is_server, prepend, append, client_control_file=client_control_file)
[ "def", "get_tests_stanza", "(", "tests", ",", "is_server", ",", "prepend", "=", "None", ",", "append", "=", "None", ",", "client_control_file", "=", "''", ")", ":", "assert", "(", "not", "(", "client_control_file", "and", "is_server", ")", ")", "if", "(", ...
constructs the control file test step code from a list of tests .
train
false
17,616
def serialize_unregistered(fullname, email): user = auth.get_user(email=email) if (user is None): serialized = {'fullname': fullname, 'id': None, 'registered': False, 'active': False, 'gravatar': gravatar(email, use_ssl=True, size=settings.PROFILE_IMAGE_MEDIUM), 'email': email} else: serialized = add_contributor_json(user) serialized['fullname'] = fullname serialized['email'] = email return serialized
[ "def", "serialize_unregistered", "(", "fullname", ",", "email", ")", ":", "user", "=", "auth", ".", "get_user", "(", "email", "=", "email", ")", "if", "(", "user", "is", "None", ")", ":", "serialized", "=", "{", "'fullname'", ":", "fullname", ",", "'id...
serializes an unregistered user .
train
false
17,617
def looks_like_a_cwl_artifact(path, classes=None): if (not _has_extension(path, CWL_EXTENSIONS)): return False with open(path, 'r') as f: try: as_dict = yaml.safe_load(f) except Exception: return False if (not isinstance(as_dict, dict)): return False file_class = as_dict.get('class', None) if ((classes is not None) and (file_class not in classes)): return False file_cwl_version = as_dict.get('cwlVersion', None) return (file_cwl_version is not None)
[ "def", "looks_like_a_cwl_artifact", "(", "path", ",", "classes", "=", "None", ")", ":", "if", "(", "not", "_has_extension", "(", "path", ",", "CWL_EXTENSIONS", ")", ")", ":", "return", "False", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", "...
quick check to see if a file looks like it may be a cwl artifact .
train
false
17,618
def _AddPropertiesForNonRepeatedCompositeField(field, cls): proto_field_name = field.name property_name = _PropertyName(proto_field_name) message_type = field.message_type def getter(self): field_value = self._fields.get(field) if (field_value is None): field_value = message_type._concrete_class() field_value._SetListener(self._listener_for_children) field_value = self._fields.setdefault(field, field_value) return field_value getter.__module__ = None getter.__doc__ = ('Getter for %s.' % proto_field_name) def setter(self, new_value): raise AttributeError(('Assignment not allowed to composite field "%s" in protocol message object.' % proto_field_name)) doc = ('Magic attribute generated for "%s" proto field.' % proto_field_name) setattr(cls, property_name, property(getter, setter, doc=doc))
[ "def", "_AddPropertiesForNonRepeatedCompositeField", "(", "field", ",", "cls", ")", ":", "proto_field_name", "=", "field", ".", "name", "property_name", "=", "_PropertyName", "(", "proto_field_name", ")", "message_type", "=", "field", ".", "message_type", "def", "ge...
adds a public property for a nonrepeated .
train
false
17,620
def _get_level(value, levels, prefix=None): if ((value > 1) or (value < 0)): raise ValueError(('Encountered invalid normalized alpha diversity value %s. Normalized values must be between 0 and 1.' % value)) check = [i for i in range(0, len(levels)) if (levels[i] == value)] if len(check): value_level = (check[0] + 2) else: value_level = (searchsorted(levels, value) + 1) if (prefix is not None): output = '{0}_{1}_of_{2}'.format(prefix, value_level, (len(levels) + 1)) else: output = value_level return output
[ "def", "_get_level", "(", "value", ",", "levels", ",", "prefix", "=", "None", ")", ":", "if", "(", "(", "value", ">", "1", ")", "or", "(", "value", "<", "0", ")", ")", ":", "raise", "ValueError", "(", "(", "'Encountered invalid normalized alpha diversity...
accommodate a value into the levels list; return a string or an integer input: value: normalized value to assign a level to .
train
false
17,621
def _prevent_segment_delete_with_port_bound(resource, event, trigger, context, segment): segment_id = segment['id'] query = context.session.query(models_v2.Port) query = query.join(models.PortBindingLevel, (models.PortBindingLevel.port_id == models_v2.Port.id)) query = query.filter((models.PortBindingLevel.segment_id == segment_id)) port_ids = [p.id for p in query] if port_ids: reason = (_('The segment is still bound with port(s) %s') % ', '.join(port_ids)) raise seg_exc.SegmentInUse(segment_id=segment_id, reason=reason)
[ "def", "_prevent_segment_delete_with_port_bound", "(", "resource", ",", "event", ",", "trigger", ",", "context", ",", "segment", ")", ":", "segment_id", "=", "segment", "[", "'id'", "]", "query", "=", "context", ".", "session", ".", "query", "(", "models_v2", ...
raise exception if there are any ports bound with segment_id .
train
false
17,622
def generate_subs_from_source(speed_subs, subs_type, subs_filedata, item, language='en'): _ = item.runtime.service(item, 'i18n').ugettext if (subs_type.lower() != 'srt'): raise TranscriptsGenerationException(_('We support only SubRip (*.srt) transcripts format.')) try: srt_subs_obj = SubRipFile.from_string(subs_filedata) except Exception as ex: msg = _('Something wrong with SubRip transcripts file during parsing. Inner message is {error_message}').format(error_message=ex.message) raise TranscriptsGenerationException(msg) if (not srt_subs_obj): raise TranscriptsGenerationException(_('Something wrong with SubRip transcripts file during parsing.')) sub_starts = [] sub_ends = [] sub_texts = [] for sub in srt_subs_obj: sub_starts.append(sub.start.ordinal) sub_ends.append(sub.end.ordinal) sub_texts.append(sub.text.replace('\n', ' ')) subs = {'start': sub_starts, 'end': sub_ends, 'text': sub_texts} for (speed, subs_id) in speed_subs.iteritems(): save_subs_to_store(generate_subs(speed, 1, subs), subs_id, item, language) return subs
[ "def", "generate_subs_from_source", "(", "speed_subs", ",", "subs_type", ",", "subs_filedata", ",", "item", ",", "language", "=", "'en'", ")", ":", "_", "=", "item", ".", "runtime", ".", "service", "(", "item", ",", "'i18n'", ")", ".", "ugettext", "if", ...
generate transcripts from source files and save them to assets for item module .
train
false
17,623
def get_perm(Model, perm): ct = ContentType.objects.get_for_model(Model) return Permission.objects.get(content_type=ct, codename=perm)
[ "def", "get_perm", "(", "Model", ",", "perm", ")", ":", "ct", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "Model", ")", "return", "Permission", ".", "objects", ".", "get", "(", "content_type", "=", "ct", ",", "codename", "=", "perm", ...
return the permission object .
train
false
17,624
def _degree_bound_univariate(f, g): gamma = f.ring.domain.gcd(f.LC, g.LC) p = 1 p = nextprime(p) while ((gamma % p) == 0): p = nextprime(p) fp = f.trunc_ground(p) gp = g.trunc_ground(p) hp = _gf_gcd(fp, gp, p) deghp = hp.degree() return deghp
[ "def", "_degree_bound_univariate", "(", "f", ",", "g", ")", ":", "gamma", "=", "f", ".", "ring", ".", "domain", ".", "gcd", "(", "f", ".", "LC", ",", "g", ".", "LC", ")", "p", "=", "1", "p", "=", "nextprime", "(", "p", ")", "while", "(", "(",...
compute an upper bound for the degree of the gcd of two univariate integer polynomials f and g .
train
false
17,626
@receiver(score_reset) def submissions_score_reset_handler(sender, **kwargs): course_id = kwargs['course_id'] usage_id = kwargs['item_id'] user = user_by_anonymous_id(kwargs['anonymous_user_id']) if (user is None): return PROBLEM_WEIGHTED_SCORE_CHANGED.send(sender=None, weighted_earned=0, weighted_possible=0, user_id=user.id, anonymous_user_id=kwargs['anonymous_user_id'], course_id=course_id, usage_id=usage_id, modified=kwargs['created_at'], score_db_table=ScoreDatabaseTableEnum.submissions)
[ "@", "receiver", "(", "score_reset", ")", "def", "submissions_score_reset_handler", "(", "sender", ",", "**", "kwargs", ")", ":", "course_id", "=", "kwargs", "[", "'course_id'", "]", "usage_id", "=", "kwargs", "[", "'item_id'", "]", "user", "=", "user_by_anony...
consume the score_reset signal defined in the submissions api .
train
false
17,627
@profiler.trace def policy_create(request, **kwargs): body = {'firewall_policy': kwargs} policy = neutronclient(request).create_firewall_policy(body).get('firewall_policy') return Policy(policy)
[ "@", "profiler", ".", "trace", "def", "policy_create", "(", "request", ",", "**", "kwargs", ")", ":", "body", "=", "{", "'firewall_policy'", ":", "kwargs", "}", "policy", "=", "neutronclient", "(", "request", ")", ".", "create_firewall_policy", "(", "body", ...
create a firewall policy .
train
false
17,629
def helmert(n, full=False): H = (np.tril(np.ones((n, n)), (-1)) - np.diag(np.arange(n))) d = (np.arange(n) * np.arange(1, (n + 1))) H[0] = 1 d[0] = n H_full = (H / np.sqrt(d)[:, np.newaxis]) if full: return H_full else: return H_full[1:]
[ "def", "helmert", "(", "n", ",", "full", "=", "False", ")", ":", "H", "=", "(", "np", ".", "tril", "(", "np", ".", "ones", "(", "(", "n", ",", "n", ")", ")", ",", "(", "-", "1", ")", ")", "-", "np", ".", "diag", "(", "np", ".", "arange"...
create a helmert matrix of order n .
train
false
17,630
def FillUsersInQuery(filters): for filter in filters: for property in filter.property_list(): FillUser(property)
[ "def", "FillUsersInQuery", "(", "filters", ")", ":", "for", "filter", "in", "filters", ":", "for", "property", "in", "filter", ".", "property_list", "(", ")", ":", "FillUser", "(", "property", ")" ]
fill in a synthetic user id for all user properties in a set of filters .
train
false
17,634
@utils.arg('server', metavar='<server>', help=_('Name or ID of server.')) @utils.arg('name', metavar='<name>', help=_('Name of the backup image.')) @utils.arg('backup_type', metavar='<backup-type>', help=_('The backup type, like "daily" or "weekly".')) @utils.arg('rotation', metavar='<rotation>', help=_('Int parameter representing how many backups to keep around.')) def do_backup(cs, args): _find_server(cs, args.server).backup(args.name, args.backup_type, args.rotation)
[ "@", "utils", ".", "arg", "(", "'server'", ",", "metavar", "=", "'<server>'", ",", "help", "=", "_", "(", "'Name or ID of server.'", ")", ")", "@", "utils", ".", "arg", "(", "'name'", ",", "metavar", "=", "'<name>'", ",", "help", "=", "_", "(", "'Nam...
backup a server by creating a backup type snapshot .
train
false
17,636
def extended_gcd(p, q): (a, b) = (p, q) if (a < 0): a = ((-1) * a) if (b < 0): b = ((-1) * b) x0 = 0 y1 = 0 x1 = 1 y0 = 1 while (b != 0): quotient = (a // b) (a, b) = (b, (a % b)) (x1, x0) = ((x0 - (quotient * x1)), x1) (y1, y0) = ((y0 - (quotient * y1)), y1) if (p < 0): y0 = ((-1) * y0) if (q < 0): x0 = ((-1) * x0) return (y0, x0)
[ "def", "extended_gcd", "(", "p", ",", "q", ")", ":", "(", "a", ",", "b", ")", "=", "(", "p", ",", "q", ")", "if", "(", "a", "<", "0", ")", ":", "a", "=", "(", "(", "-", "1", ")", "*", "a", ")", "if", "(", "b", "<", "0", ")", ":", ...
returns a tuple such that r = gcd = ia + jb .
train
false
17,637
def _gpi10iterator(handle): for inline in handle: if (inline[0] == '!'): continue inrec = inline.rstrip('\n').split(' DCTB ') if (len(inrec) == 1): continue inrec[5] = inrec[5].split('|') inrec[8] = inrec[8].split('|') (yield dict(zip(GPI10FIELDS, inrec)))
[ "def", "_gpi10iterator", "(", "handle", ")", ":", "for", "inline", "in", "handle", ":", "if", "(", "inline", "[", "0", "]", "==", "'!'", ")", ":", "continue", "inrec", "=", "inline", ".", "rstrip", "(", "'\\n'", ")", ".", "split", "(", "' DCTB '", ...
read gpi 1 .
train
false
17,638
def assert_aws_environ(): try: import boto except ImportError as e: raise SkipTest(str(e)) if ('AWS_ACCESS_KEY_ID' not in os.environ): raise SkipTest('AWS keys not found')
[ "def", "assert_aws_environ", "(", ")", ":", "try", ":", "import", "boto", "except", "ImportError", "as", "e", ":", "raise", "SkipTest", "(", "str", "(", "e", ")", ")", "if", "(", "'AWS_ACCESS_KEY_ID'", "not", "in", "os", ".", "environ", ")", ":", "rais...
asserts the current environment is suitable for running aws testsi .
train
false
17,639
@permission_required('questions.tag_question') @require_POST def remove_tag_async(request, question_id): name = request.POST.get('name') if name: question = get_object_or_404(Question, pk=question_id) question.tags.remove(name) question.clear_cached_tags() return HttpResponse('{}', content_type='application/json') return HttpResponseBadRequest(json.dumps({'error': unicode(NO_TAG)}), content_type='application/json')
[ "@", "permission_required", "(", "'questions.tag_question'", ")", "@", "require_POST", "def", "remove_tag_async", "(", "request", ",", "question_id", ")", ":", "name", "=", "request", ".", "POST", ".", "get", "(", "'name'", ")", "if", "name", ":", "question", ...
remove a tag from question .
train
false
17,640
def _deactivate_invalidation(certificate): try: certificate_invalidation = CertificateInvalidation.objects.get(generated_certificate=certificate, active=True) certificate_invalidation.deactivate() except CertificateInvalidation.DoesNotExist: pass
[ "def", "_deactivate_invalidation", "(", "certificate", ")", ":", "try", ":", "certificate_invalidation", "=", "CertificateInvalidation", ".", "objects", ".", "get", "(", "generated_certificate", "=", "certificate", ",", "active", "=", "True", ")", "certificate_invalid...
deactivate certificate invalidation by setting active to false .
train
false
17,641
@memoize_default(None, evaluator_is_first_arg=True) def follow_param(evaluator, param): def eval_docstring(docstr): param_str = str(param.name) return set([p for string in _search_param_in_docstr(docstr, param_str) for p in _evaluate_for_statement_string(evaluator, string, module)]) func = param.parent_function module = param.get_parent_until() docstr = func.raw_doc types = eval_docstring(docstr) if (func.name.value == '__init__'): cls = func.get_parent_until(Class) if (cls.type == 'classdef'): types |= eval_docstring(cls.raw_doc) return types
[ "@", "memoize_default", "(", "None", ",", "evaluator_is_first_arg", "=", "True", ")", "def", "follow_param", "(", "evaluator", ",", "param", ")", ":", "def", "eval_docstring", "(", "docstr", ")", ":", "param_str", "=", "str", "(", "param", ".", "name", ")"...
determines a set of potential types for param using docstring hints :type evaluator: jedi .
train
false
17,642
def setup_logrotate(app_name, watch, log_size): app_logrotate_script = '{0}/appscale-{1}'.format(LOGROTATE_CONFIG_DIR, app_name) contents = '/var/log/appscale/{watch}*.log {{\n size {size}\n missingok\n rotate 7\n compress\n delaycompress\n notifempty\n copytruncate\n}}\n'.format(watch=watch, size=log_size) logging.debug('Logrotate file: {} - Contents:\n{}'.format(app_logrotate_script, contents)) with open(app_logrotate_script, 'w') as app_logrotate_fd: app_logrotate_fd.write(contents) return True
[ "def", "setup_logrotate", "(", "app_name", ",", "watch", ",", "log_size", ")", ":", "app_logrotate_script", "=", "'{0}/appscale-{1}'", ".", "format", "(", "LOGROTATE_CONFIG_DIR", ",", "app_name", ")", "contents", "=", "'/var/log/appscale/{watch}*.log {{\\n size {size}\\n...
creates a logrotate script for the logs that the given application will create .
train
false
17,643
def render_string_template(template_string, context=None, context_instance=None): if (context is None): context = {} template = Template(template_string) if (('user' not in context) and context_instance): if ('request' in context_instance): context.update({'user': context_instance['request']}) return template.render(context)
[ "def", "render_string_template", "(", "template_string", ",", "context", "=", "None", ",", "context_instance", "=", "None", ")", ":", "if", "(", "context", "is", "None", ")", ":", "context", "=", "{", "}", "template", "=", "Template", "(", "template_string",...
performs rendering using template_string instead of a file .
train
false
17,644
def mul_sign(lh_expr, rh_expr): is_pos = ((lh_expr.is_zero() or rh_expr.is_zero()) or (lh_expr.is_positive() and rh_expr.is_positive()) or (lh_expr.is_negative() and rh_expr.is_negative())) is_neg = ((lh_expr.is_zero() or rh_expr.is_zero()) or (lh_expr.is_positive() and rh_expr.is_negative()) or (lh_expr.is_negative() and rh_expr.is_positive())) return (is_pos, is_neg)
[ "def", "mul_sign", "(", "lh_expr", ",", "rh_expr", ")", ":", "is_pos", "=", "(", "(", "lh_expr", ".", "is_zero", "(", ")", "or", "rh_expr", ".", "is_zero", "(", ")", ")", "or", "(", "lh_expr", ".", "is_positive", "(", ")", "and", "rh_expr", ".", "i...
give the sign resulting from multiplying two expressions .
train
false
17,646
def clean_text_by_sentences(text): original_sentences = split_sentences(text) filtered_sentences = [join_words(sentence) for sentence in preprocess_documents(original_sentences)] return merge_syntactic_units(original_sentences, filtered_sentences)
[ "def", "clean_text_by_sentences", "(", "text", ")", ":", "original_sentences", "=", "split_sentences", "(", "text", ")", "filtered_sentences", "=", "[", "join_words", "(", "sentence", ")", "for", "sentence", "in", "preprocess_documents", "(", "original_sentences", "...
tokenizes a given text into sentences .
train
false
17,648
def unpack_tarball(tar_filename, dest_path): if os.path.exists(tar_filename): if file_access_rights(dest_path, os.W_OK, check_above=False): try: this_tar_file = tarfile.open(tar_filename, 'r:bz2') except Exception as e: raise IOError(("[tar] cannot open '%s'" % tar_filename)) else: try: this_tar_file.extractall(dest_path) except Exception as e: raise IOError(("[tar] error while extracting '%s'" % tar_filename)) else: pass else: raise IOError(("[tar] no right access to '%s'" % dest_path)) else: raise IOError(("'%s' not found" % tar_filename))
[ "def", "unpack_tarball", "(", "tar_filename", ",", "dest_path", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "tar_filename", ")", ":", "if", "file_access_rights", "(", "dest_path", ",", "os", ".", "W_OK", ",", "check_above", "=", "False", ")", ...
unpacks a tarball to a destination directory .
train
false
17,650
def get_errstring(url, base='Invalid URL'): url_error = url.errorString() if url_error: return (base + ' - {}'.format(url_error)) else: return base
[ "def", "get_errstring", "(", "url", ",", "base", "=", "'Invalid URL'", ")", ":", "url_error", "=", "url", ".", "errorString", "(", ")", "if", "url_error", ":", "return", "(", "base", "+", "' - {}'", ".", "format", "(", "url_error", ")", ")", "else", ":...
get an error string for a url .
train
false
17,651
def has_course_ended(end_date): return ((datetime.now(utc) > end_date) if (end_date is not None) else False)
[ "def", "has_course_ended", "(", "end_date", ")", ":", "return", "(", "(", "datetime", ".", "now", "(", "utc", ")", ">", "end_date", ")", "if", "(", "end_date", "is", "not", "None", ")", "else", "False", ")" ]
given a courses end datetime .
train
false
17,652
def active(display_progress=False): ret = {} client = salt.client.get_local_client(__opts__['conf_file']) try: active_ = client.cmd('*', 'saltutil.running', timeout=__opts__['timeout']) except SaltClientError as client_error: print(client_error) return ret if display_progress: __jid_event__.fire_event({'message': 'Attempting to contact minions: {0}'.format(list(active_.keys()))}, 'progress') for (minion, data) in six.iteritems(active_): if display_progress: __jid_event__.fire_event({'message': 'Received reply from minion {0}'.format(minion)}, 'progress') if (not isinstance(data, list)): continue for job in data: if (not (job['jid'] in ret)): ret[job['jid']] = _format_jid_instance(job['jid'], job) ret[job['jid']].update({'Running': [{minion: job.get('pid', None)}], 'Returned': []}) else: ret[job['jid']]['Running'].append({minion: job['pid']}) mminion = salt.minion.MasterMinion(__opts__) for jid in ret: returner = _get_returner((__opts__['ext_job_cache'], __opts__['master_job_cache'])) data = mminion.returners['{0}.get_jid'.format(returner)](jid) for minion in data: if (minion not in ret[jid]['Returned']): ret[jid]['Returned'].append(minion) return ret
[ "def", "active", "(", "display_progress", "=", "False", ")", ":", "ret", "=", "{", "}", "client", "=", "salt", ".", "client", ".", "get_local_client", "(", "__opts__", "[", "'conf_file'", "]", ")", "try", ":", "active_", "=", "client", ".", "cmd", "(",...
list of tasks currently being executed .
train
true
17,653
def _group_lengths(grouping): from itertools import chain, repeat if (not grouping): return [] elif ((grouping[(-1)] == 0) and (len(grouping) >= 2)): return chain(grouping[:(-1)], repeat(grouping[(-2)])) elif (grouping[(-1)] == _locale.CHAR_MAX): return grouping[:(-1)] else: raise ValueError('unrecognised format for grouping')
[ "def", "_group_lengths", "(", "grouping", ")", ":", "from", "itertools", "import", "chain", ",", "repeat", "if", "(", "not", "grouping", ")", ":", "return", "[", "]", "elif", "(", "(", "grouping", "[", "(", "-", "1", ")", "]", "==", "0", ")", "and"...
convert a localeconv-style grouping into a iterable of integers representing group lengths .
train
false
17,654
def translate_matrix(m, v): (a, b, c, d, e, f) = m (x, y) = v return (a, b, c, d, (((x * a) + (y * c)) + e), (((x * b) + (y * d)) + f))
[ "def", "translate_matrix", "(", "m", ",", "v", ")", ":", "(", "a", ",", "b", ",", "c", ",", "d", ",", "e", ",", "f", ")", "=", "m", "(", "x", ",", "y", ")", "=", "v", "return", "(", "a", ",", "b", ",", "c", ",", "d", ",", "(", "(", ...
translates a matrix by .
train
true