text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def querymany(self, sql_query, columns, seq_of_parameters): """ Same as .query() but eventually call the .executemany() method of the underlying DBAPI2.0 cursor instead of .execute() """ tmp_query = self.__preparequery(sql_query, columns) if self.__methods[METHOD_MODULE].paramstyle == "qmark": raise NotImplementedError, "qmark isn't fully supported" try: self.__dbapi2_cursor.executemany(tmp_query, seq_of_parameters) except Exception, e: self.__connection.reconnect(tmp_query, self.__log_reconnect) self.__dbapi2_cursor = self.__connection._get_raw_cursor() self.__dbapi2_cursor.executemany(tmp_query, seq_of_parameters)
[ "def", "querymany", "(", "self", ",", "sql_query", ",", "columns", ",", "seq_of_parameters", ")", ":", "tmp_query", "=", "self", ".", "__preparequery", "(", "sql_query", ",", "columns", ")", "if", "self", ".", "__methods", "[", "METHOD_MODULE", "]", ".", "...
43.176471
25.411765
def new(self, br, ino, sector_count, load_seg, media_name, system_type, platform_id, bootable): # type: (headervd.BootRecord, inode.Inode, int, int, str, int, int, bool) -> None ''' A method to create a new El Torito Boot Catalog. Parameters: br - The boot record that this El Torito Boot Catalog is associated with. ino - The Inode to associate with the initial entry. sector_count - The number of sectors for the initial entry. load_seg - The load segment address of the boot image. media_name - The name of the media type, one of 'noemul', 'floppy', or 'hdemul'. system_type - The partition type the entry should be. platform_id - The platform id to set in the validation entry. bootable - Whether this entry should be bootable. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('El Torito Boot Catalog already initialized') # Create the El Torito validation entry self.validation_entry.new(platform_id) self.initial_entry.new(sector_count, load_seg, media_name, system_type, bootable) self.initial_entry.set_inode(ino) ino.linked_records.append(self.initial_entry) self.br = br self._initialized = True
[ "def", "new", "(", "self", ",", "br", ",", "ino", ",", "sector_count", ",", "load_seg", ",", "media_name", ",", "system_type", ",", "platform_id", ",", "bootable", ")", ":", "# type: (headervd.BootRecord, inode.Inode, int, int, str, int, int, bool) -> None", "if", "se...
42.6875
25.8125
def get_path(self, repo): """ Return the path for the repo """ if repo.endswith('.git'): repo = repo.split('.git')[0] org, name = repo.split('/')[-2:] path = self.plugins_dir path = join(path, org, name) return path, org, name
[ "def", "get_path", "(", "self", ",", "repo", ")", ":", "if", "repo", ".", "endswith", "(", "'.git'", ")", ":", "repo", "=", "repo", ".", "split", "(", "'.git'", ")", "[", "0", "]", "org", ",", "name", "=", "repo", ".", "split", "(", "'/'", ")",...
34.875
5.625
def http_request(self, path="/", method="GET", host=None, port=None, json=False, data=None): """ perform a HTTP request :param path: str, path within the request, e.g. "/api/version" :param method: str, HTTP method :param host: str, if None, set to 127.0.0.1 :param port: str or int, if None, set to 8080 :param json: bool, should we expect json? :param data: data to send (can be dict, list, str) :return: dict """ host = host or '127.0.0.1' port = port or 8080 url = get_url(host=host, port=port, path=path) return self.http_session.request(method, url, json=json, data=data)
[ "def", "http_request", "(", "self", ",", "path", "=", "\"/\"", ",", "method", "=", "\"GET\"", ",", "host", "=", "None", ",", "port", "=", "None", ",", "json", "=", "False", ",", "data", "=", "None", ")", ":", "host", "=", "host", "or", "'127.0.0.1'...
37.5
19.5
def get_composition(self, composition_id): """Gets the ``Composition`` specified by its ``Id``. arg: composition_id (osid.id.Id): ``Id`` of the ``Composiiton`` return: (osid.repository.Composition) - the composition raise: NotFound - ``composition_id`` not found raise: NullArgument - ``composition_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method is must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resource # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('repository', collection='Composition', runtime=self._runtime) result = collection.find_one( dict({'_id': ObjectId(self._get_id(composition_id, 'repository').get_identifier())}, **self._view_filter())) return objects.Composition(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
[ "def", "get_composition", "(", "self", ",", "composition_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceLookupSession.get_resource", "# NOTE: This implementation currently ignores plenary view", "collection", "=", "JSONClientValidated", "(", "'repository'", ...
51.565217
20.043478
def _get_method_full_name(func): """ Return fully qualified function name. This method will attempt to find "full name" of the given function object. This full name is either of the form "<class name>.<method name>" if the function is a class method, or "<module name>.<func name>" if it's a regular function. Thus, this is an attempt to back-port func.__qualname__ to Python 2. :param func: a function object. :returns: string with the function's full name as explained above. """ # Python 3.3 already has this information available... if hasattr(func, "__qualname__"): return func.__qualname__ module = inspect.getmodule(func) if module is None: return "?.%s" % getattr(func, "__name__", "?") for cls_name in dir(module): cls = getattr(module, cls_name) if not inspect.isclass(cls): continue for method_name in dir(cls): cls_method = getattr(cls, method_name) if cls_method == func: return "%s.%s" % (cls_name, method_name) if hasattr(func, "__name__"): return "%s.%s" % (module.__name__, func.__name__) return "<unknown>"
[ "def", "_get_method_full_name", "(", "func", ")", ":", "# Python 3.3 already has this information available...", "if", "hasattr", "(", "func", ",", "\"__qualname__\"", ")", ":", "return", "func", ".", "__qualname__", "module", "=", "inspect", ".", "getmodule", "(", ...
40.75
20.178571
def _get_block_result(chars_a, chars_b): """Get the first block from two character lists and compare If character list ``a`` begins with a digit, the :any:`_pop_digit` function is called on both lists to get blocks of all consecutive digits at the start of each list. If the length of the block returned when popping digits for ``b`` is zero (``b`` started with a letter), ``a`` is newer. If ``b`` is of nonzero length, the blocks are compared using :any:`_compare_blocks`. If character list ``a`` begins with a letter, the :any:`_pop_letter` function is called on both lists to get blocks of all consecutive letters at the start of each list. If the length of the block returned when popping letters for ``b`` is zero (``b`` started with a digit), ``b`` is newer. If ``b`` is of nonzero length, blocks ``a`` and ``b`` are compared using :any:`_compare_blocks`. :param list chars_a: a list of characters derived from a version string :param list chars_b: a list of characters derived from a version string :return: 1 (if ``a`` is newer), 0 (if versions are equal), or -1 (if ``b`` is newer) :rtype: int """ logger.debug('_get_block_result(%s, %s)', chars_a, chars_b) first_is_digit = chars_a[0].isdigit() pop_func = _pop_digits if first_is_digit else _pop_letters return_if_no_b = a_newer if first_is_digit else b_newer block_a, block_b = pop_func(chars_a), pop_func(chars_b) if len(block_b) == 0: logger.debug('blocks are equal') return return_if_no_b return _compare_blocks(block_a, block_b)
[ "def", "_get_block_result", "(", "chars_a", ",", "chars_b", ")", ":", "logger", ".", "debug", "(", "'_get_block_result(%s, %s)'", ",", "chars_a", ",", "chars_b", ")", "first_is_digit", "=", "chars_a", "[", "0", "]", ".", "isdigit", "(", ")", "pop_func", "=",...
45.885714
20.228571
def amalgamate(colcount, snode, snptr, snpar, snpost, merge_function): """ Supernodal amalgamation. colcount, snode, snptr, snpar, snpost = ... amalgamate(colcount, snode, snptr, snpar, snpost, merge_function) PURPOSE Iterates over the clique tree in topological order and greedily merges a supernode with its parent if merge_function(|J_{par(k)}|, |J_k|, |N_{par(k)}|, |N_k|) returns True. ARGUMENTS colcount vector with column counts snode vector with supernodes snptr vector with offsets snpar vector with supernodal parent indices snpost vector with supernodal post ordering merge_function function RETURNS colcount vector with amalgamated column counts snode vector with amalgamated supernodes snptr vector with amalgamated offsets snpar vector with amalgamated supernodal parent indices snpost vector with amalgamated supernodal post ordering """ N = len(snpost) ch = {} for j in snpost: if snpar[j] in ch: ch[snpar[j]].append(j) else: ch[snpar[j]] = [j] snlist = [snode[snptr[k]:snptr[k+1]] for k in range(N)] snpar_ = +snpar colcount_ = +colcount Ns = N for k in snpost: if snpar_[k] != k: colk = colcount_[snlist[k][0]] colp = colcount_[snlist[snpar_[k]][0]] nk = len(snlist[k]) np = len(snlist[snpar_[k]]) if merge_function and merge_function(colp,colk,np,nk): # merge supernode k and snpar[k] snlist[snpar_[k]] = matrix(sorted(list(snlist[k]) + list(snlist[snpar_[k]]))) snlist[k] = None colcount_[snlist[snpar_[k]][0]] = colp + nk Ns -= 1 if k in ch: for c in ch[k]: snpar_[c] = snpar_[k] ch[snpar_[k]] += ch[k] snpar_[k] = k L = [i for i,s in enumerate(snlist) if s is not None] snptr_ = matrix(0,(len(L)+1,1)) snode_ = +snode for i,l in enumerate(L): snptr_[i+1] = snptr_[i] + len(snlist[l]) snode_[snptr_[i]:snptr_[i+1]] = snlist[l] snpar_ = snpar_[L] for i in range(len(snpar_)): snpar_[i] = L.index(snpar_[i]) snpost_ = post_order(snpar_) return colcount_, snode_, snptr_, snpar_, snpost_
[ "def", "amalgamate", "(", "colcount", ",", "snode", ",", "snptr", ",", "snpar", ",", "snpost", ",", "merge_function", ")", ":", "N", "=", "len", "(", "snpost", ")", "ch", "=", "{", "}", "for", "j", "in", "snpost", ":", "if", "snpar", "[", "j", "]...
28.987654
20.444444
def error_of_type(handler: astroid.ExceptHandler, error_type) -> bool: """ Check if the given exception handler catches the given error_type. The *handler* parameter is a node, representing an ExceptHandler node. The *error_type* can be an exception, such as AttributeError, the name of an exception, or it can be a tuple of errors. The function will return True if the handler catches any of the given errors. """ def stringify_error(error): if not isinstance(error, str): return error.__name__ return error if not isinstance(error_type, tuple): error_type = (error_type,) # type: ignore expected_errors = {stringify_error(error) for error in error_type} # type: ignore if not handler.type: return True return handler.catch(expected_errors)
[ "def", "error_of_type", "(", "handler", ":", "astroid", ".", "ExceptHandler", ",", "error_type", ")", "->", "bool", ":", "def", "stringify_error", "(", "error", ")", ":", "if", "not", "isinstance", "(", "error", ",", "str", ")", ":", "return", "error", "...
35.826087
18.956522
def Brkic_2011_1(Re, eD): r'''Calculates Darcy friction factor using the method in Brkic (2011) [2]_ as shown in [1]_. .. math:: f_d = [-2\log(10^{-0.4343\beta} + \frac{\epsilon}{3.71D})]^{-2} .. math:: \beta = \ln \frac{Re}{1.816\ln\left(\frac{1.1Re}{\ln(1+1.1Re)}\right)} Parameters ---------- Re : float Reynolds number, [-] eD : float Relative roughness, [-] Returns ------- fd : float Darcy friction factor [-] Notes ----- No range of validity specified for this equation. Examples -------- >>> Brkic_2011_1(1E5, 1E-4) 0.01812455874141297 References ---------- .. [1] Winning, H. and T. Coole. "Explicit Friction Factor Accuracy and Computational Efficiency for Turbulent Flow in Pipes." Flow, Turbulence and Combustion 90, no. 1 (January 1, 2013): 1-27. doi:10.1007/s10494-012-9419-7 .. [2] Brkic, Dejan."Review of Explicit Approximations to the Colebrook Relation for Flow Friction." Journal of Petroleum Science and Engineering 77, no. 1 (April 2011): 34-48. doi:10.1016/j.petrol.2011.02.006. ''' beta = log(Re/(1.816*log(1.1*Re/log(1+1.1*Re)))) return (-2*log10(10**(-0.4343*beta)+eD/3.71))**-2
[ "def", "Brkic_2011_1", "(", "Re", ",", "eD", ")", ":", "beta", "=", "log", "(", "Re", "/", "(", "1.816", "*", "log", "(", "1.1", "*", "Re", "/", "log", "(", "1", "+", "1.1", "*", "Re", ")", ")", ")", ")", "return", "(", "-", "2", "*", "lo...
28.409091
25
def from_apps(cls, apps): "Takes in an Apps and returns a VersionedProjectState matching it" app_models = {} for model in apps.get_models(include_swapped=True): model_state = VersionedModelState.from_model(model) app_models[(model_state.app_label, model_state.name.lower())] = model_state return cls(app_models)
[ "def", "from_apps", "(", "cls", ",", "apps", ")", ":", "app_models", "=", "{", "}", "for", "model", "in", "apps", ".", "get_models", "(", "include_swapped", "=", "True", ")", ":", "model_state", "=", "VersionedModelState", ".", "from_model", "(", "model", ...
51.571429
23.571429
def render(self, text, add_header=False): """Render the HTML. Parameters ---------- add_header: boolean (default: False) If True, add HTML5 header and footer. Returns ------- str The rendered HTML. """ html = mark_text(text, self.aesthetics, self.rules) html = html.replace('\n', '<br/>') if add_header: html = '\n'.join([HEADER, self.css, MIDDLE, html, FOOTER]) #print('\n'.join((HEADER, self.css, MIDDLE, html, FOOTER))) return html
[ "def", "render", "(", "self", ",", "text", ",", "add_header", "=", "False", ")", ":", "html", "=", "mark_text", "(", "text", ",", "self", ".", "aesthetics", ",", "self", ".", "rules", ")", "html", "=", "html", ".", "replace", "(", "'\\n'", ",", "'<...
27.9
19.2
def defer(self, *args, **kwargs): """Call the function and immediately return an asynchronous object. The calling code will need to check for the result at a later time using: In Python 2/3 using ThreadPools - an AsyncResult (https://docs.python.org/2/library/multiprocessing.html#multiprocessing.pool.AsyncResult) In Python 3 using Asyncio - a Future (https://docs.python.org/3/library/asyncio-task.html#future) :param args: :param kwargs: :return: """ LOG.debug( '%s on %s (awaitable %s async %s provider %s)', 'deferring', self._func, self._is_awaitable, self._is_asyncio_provider, self._concurrency_provider ) if self._blocked: raise RuntimeError('Already activated this deferred call by blocking on it') with self._lock: if not self._deferable: func_partial = functools.partial(self._func, *args, **kwargs) # we are either: # - pure asyncio # - asyncio but with blocking function # - not asyncio, use threadpool self._deferable = ( # pure asyncio asyncio.ensure_future(func_partial(), loop=self._concurrency_provider) if self._is_awaitable else ( # asyncio blocked self._concurrency_provider.run_in_executor( func=func_partial, executor=None ) if self._is_asyncio_provider else ( # not asyncio self._concurrency_provider.apply_async(func_partial) ) ) ) return self._deferable
[ "def", "defer", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "LOG", ".", "debug", "(", "'%s on %s (awaitable %s async %s provider %s)'", ",", "'deferring'", ",", "self", ".", "_func", ",", "self", ".", "_is_awaitable", ",", "self", "."...
37.7
19.7
def forward_for_single_feature_map(self, anchors, objectness, box_regression): """ Arguments: anchors: list[BoxList] objectness: tensor of size N, A, H, W box_regression: tensor of size N, A * 4, H, W """ device = objectness.device N, A, H, W = objectness.shape # put in the same format as anchors objectness = permute_and_flatten(objectness, N, A, 1, H, W).view(N, -1) objectness = objectness.sigmoid() box_regression = permute_and_flatten(box_regression, N, A, 4, H, W) num_anchors = A * H * W pre_nms_top_n = min(self.pre_nms_top_n, num_anchors) objectness, topk_idx = objectness.topk(pre_nms_top_n, dim=1, sorted=True) batch_idx = torch.arange(N, device=device)[:, None] box_regression = box_regression[batch_idx, topk_idx] image_shapes = [box.size for box in anchors] concat_anchors = torch.cat([a.bbox for a in anchors], dim=0) concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx] proposals = self.box_coder.decode( box_regression.view(-1, 4), concat_anchors.view(-1, 4) ) proposals = proposals.view(N, -1, 4) result = [] for proposal, score, im_shape in zip(proposals, objectness, image_shapes): boxlist = BoxList(proposal, im_shape, mode="xyxy") boxlist.add_field("objectness", score) boxlist = boxlist.clip_to_image(remove_empty=False) boxlist = remove_small_boxes(boxlist, self.min_size) boxlist = boxlist_nms( boxlist, self.nms_thresh, max_proposals=self.post_nms_top_n, score_field="objectness", ) result.append(boxlist) return result
[ "def", "forward_for_single_feature_map", "(", "self", ",", "anchors", ",", "objectness", ",", "box_regression", ")", ":", "device", "=", "objectness", ".", "device", "N", ",", "A", ",", "H", ",", "W", "=", "objectness", ".", "shape", "# put in the same format ...
37.625
21.333333
def loads(xml, force_list=None): """Cria um dicionário com os dados do XML. O dicionário terá como chave o nome do nó root e como valor o conteúdo do nó root. Quando o conteúdo de um nó é uma lista de nós então o valor do nó será um dicionário com uma chave para cada nó. Entretanto, se existir nós, de um mesmo pai, com o mesmo nome, então eles serão armazenados em uma mesma chave do dicionário que terá como valor uma lista. O force_list deverá ter nomes de nós do XML que necessariamente terão seus valores armazenados em uma lista no dicionário de retorno. :: Por exemplo: xml_1 = &lt;?xml version="1.0" encoding="UTF-8"?&gt; &lt;networkapi versao="1.0"&gt; &lt;testes&gt; &lt;teste&gt;1&lt;teste&gt; &lt;teste&gt;2&lt;teste&gt; &lt;/testes&gt; &lt;/networkapi&gt; A chamada loads(xml_1), irá gerar o dicionário: {'networkapi':{'testes':{'teste':[1,2]}}} xml_2 = &lt;?xml version="1.0" encoding="UTF-8"?&gt; &lt;networkapi versao="1.0"&gt; &lt;testes&gt; &lt;teste&gt;1&lt;teste&gt; &lt;/testes&gt; &lt;/networkapi&gt; A chamada loads(xml_2), irá gerar o dicionário: {'networkapi':{'testes':{'teste':1}}} A chamada loads(xml_2, ['teste']), irá gerar o dicionário: {'networkapi':{'testes':{'teste':[1]}}} Ou seja, o XML_2 tem apenas um nó 'teste', porém, ao informar o parâmetro 'force_list' com o valor ['teste'], a chave 'teste', no dicionário, terá o valor dentro de uma lista. :param xml: XML :param force_list: Lista com os nomes dos nós do XML que deverão ter seus valores armazenados em lista dentro da chave do dicionário de retorno. :return: Dicionário com os nós do XML. :raise XMLErrorUtils: Representa um erro ocorrido durante o marshall ou unmarshall do XML. """ if force_list is None: force_list = [] try: xml = remove_illegal_characters(xml) doc = parseString(xml) except Exception as e: raise XMLErrorUtils(e, u'Falha ao realizar o parse do xml.') root = doc.documentElement map = dict() attrs_map = dict() if root.hasAttributes(): attributes = root.attributes for i in range(attributes.length): attr = attributes.item(i) attrs_map[attr.nodeName] = attr.nodeValue map[root.nodeName] = _create_childs_map(root, force_list) return map
[ "def", "loads", "(", "xml", ",", "force_list", "=", "None", ")", ":", "if", "force_list", "is", "None", ":", "force_list", "=", "[", "]", "try", ":", "xml", "=", "remove_illegal_characters", "(", "xml", ")", "doc", "=", "parseString", "(", "xml", ")", ...
34.642857
26.457143
def iofunctions(self): """Input/output functions of the model class.""" lines = Lines() for func in ('open_files', 'close_files', 'load_data', 'save_data'): if ((func == 'load_data') and (getattr(self.model.sequences, 'inputs', None) is None)): continue if ((func == 'save_data') and ((getattr(self.model.sequences, 'fluxes', None) is None) and (getattr(self.model.sequences, 'states', None) is None))): continue print(' . %s' % func) nogil = func in ('load_data', 'save_data') idx_as_arg = func == 'save_data' lines.add(1, method_header( func, nogil=nogil, idx_as_arg=idx_as_arg)) for subseqs in self.model.sequences: if func == 'load_data': applyfuncs = ('inputs',) elif func == 'save_data': applyfuncs = ('fluxes', 'states') else: applyfuncs = ('inputs', 'fluxes', 'states') if subseqs.name in applyfuncs: if func == 'close_files': lines.add(2, 'self.sequences.%s.%s()' % (subseqs.name, func)) else: lines.add(2, 'self.sequences.%s.%s(self.idx_sim)' % (subseqs.name, func)) return lines
[ "def", "iofunctions", "(", "self", ")", ":", "lines", "=", "Lines", "(", ")", "for", "func", "in", "(", "'open_files'", ",", "'close_files'", ",", "'load_data'", ",", "'save_data'", ")", ":", "if", "(", "(", "func", "==", "'load_data'", ")", "and", "("...
47.516129
14.935484
def load_texture(self, file_path): """Generate our sprite's surface by loading the specified image from disk. Note that this automatically centers the origin.""" self.image = pygame.image.load(file_path) self.apply_texture(self.image)
[ "def", "load_texture", "(", "self", ",", "file_path", ")", ":", "self", ".", "image", "=", "pygame", ".", "image", ".", "load", "(", "file_path", ")", "self", ".", "apply_texture", "(", "self", ".", "image", ")" ]
52.6
3.4
def plotstuff(self, T=[0, 1000]): """ Create a scatter plot of the contents of the database, with entries on the interval T. Parameters ---------- T : list Time interval. Returns ------- None See also -------- GDF.select_neurons_interval """ fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(111) neurons = self.neurons() i = 0 for x in self.select_neurons_interval(neurons, T): ax.plot(x, np.zeros(x.size) + neurons[i], 'o', markersize=1, markerfacecolor='k', markeredgecolor='k', alpha=0.25) i += 1 ax.set_xlabel('time (ms)') ax.set_ylabel('neuron ID') ax.set_xlim(T[0], T[1]) ax.set_ylim(neurons.min(), neurons.max()) ax.set_title('database content on T = [%.0f, %.0f]' % (T[0], T[1]))
[ "def", "plotstuff", "(", "self", ",", "T", "=", "[", "0", ",", "1000", "]", ")", ":", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "10", ",", "10", ")", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "neurons", "="...
25.184211
20.552632
def visit_For(self, node): """ OUT = (node,) + last body statements RAISES = body's that are not break or continue """ currs = (node,) break_currs = tuple() raises = () # handle body for n in node.body: self.result.add_node(n) for curr in currs: self.result.add_edge(curr, n) currs, nraises = self.visit(n) for nraise in nraises: if isinstance(nraise, ast.Break): break_currs += (nraise,) elif isinstance(nraise, ast.Continue): self.result.add_edge(nraise, node) else: raises += (nraise,) # add the backward loop for curr in currs: self.result.add_edge(curr, node) # the else statement if needed if node.orelse: for n in node.orelse: self.result.add_node(n) for curr in currs: self.result.add_edge(curr, n) currs, nraises = self.visit(n) # while only if hasattr(node, 'test') and is_true_predicate(node.test): return break_currs, raises return break_currs + currs, raises
[ "def", "visit_For", "(", "self", ",", "node", ")", ":", "currs", "=", "(", "node", ",", ")", "break_currs", "=", "tuple", "(", ")", "raises", "=", "(", ")", "# handle body", "for", "n", "in", "node", ".", "body", ":", "self", ".", "result", ".", ...
34.472222
10.194444
def status(self): """ In most cases, reading status will return the same value as `mode`. In cases where there is an `auto` mode additional values may be returned, such as `no-device` or `error`. See individual port driver documentation for the full list of possible values. """ self._status, value = self.get_attr_string(self._status, 'status') return value
[ "def", "status", "(", "self", ")", ":", "self", ".", "_status", ",", "value", "=", "self", ".", "get_attr_string", "(", "self", ".", "_status", ",", "'status'", ")", "return", "value" ]
46
22
def _callable_contents(obj): """Return the signature contents of a callable Python object. """ try: # Test if obj is a method. return _function_contents(obj.__func__) except AttributeError: try: # Test if obj is a callable object. return _function_contents(obj.__call__.__func__) except AttributeError: try: # Test if obj is a code object. return _code_contents(obj) except AttributeError: # Test if obj is a function object. return _function_contents(obj)
[ "def", "_callable_contents", "(", "obj", ")", ":", "try", ":", "# Test if obj is a method.", "return", "_function_contents", "(", "obj", ".", "__func__", ")", "except", "AttributeError", ":", "try", ":", "# Test if obj is a callable object.", "return", "_function_conten...
30
15.6
def get_text_path(self): """ Returns the path of the directory containing text if they exist in this dataset. """ for res in self.dsDoc['dataResources']: resPath = res['resPath'] resType = res['resType'] isCollection = res['isCollection'] if resType == 'text' and isCollection: return os.path.join(self.dsHome, resPath) # if the for loop is over and no image directory is found, then return None raise RuntimeError('could not find learningData file the dataset')
[ "def", "get_text_path", "(", "self", ")", ":", "for", "res", "in", "self", ".", "dsDoc", "[", "'dataResources'", "]", ":", "resPath", "=", "res", "[", "'resPath'", "]", "resType", "=", "res", "[", "'resType'", "]", "isCollection", "=", "res", "[", "'is...
43.307692
17.615385
def selection_index_to_idx(self, key, selection_index): '''return a mission idx from a selection_index''' a = key.split(' ') if a[0] != 'mission' or len(a) != 2: print("Bad mission object %s" % key) return None midx = int(a[1]) if midx < 0 or midx >= len(self.mission_list): print("Bad mission index %s" % key) return None mlist = self.mission_list[midx] if selection_index < 0 or selection_index >= len(mlist): print("Bad mission polygon %s" % selection_index) return None idx = mlist[selection_index] return idx
[ "def", "selection_index_to_idx", "(", "self", ",", "key", ",", "selection_index", ")", ":", "a", "=", "key", ".", "split", "(", "' '", ")", "if", "a", "[", "0", "]", "!=", "'mission'", "or", "len", "(", "a", ")", "!=", "2", ":", "print", "(", "\"...
40.125
13.625
def init_opdata(l, from_mod, version=None, is_pypy=False): """Sets up a number of the structures found in Python's opcode.py. Python opcode.py routines assign attributes to modules. In order to do this in a modular way here, the local dictionary for the module is passed. """ if version: l['python_version'] = version l['is_pypy'] = is_pypy l['cmp_op'] = cmp_op l['HAVE_ARGUMENT'] = HAVE_ARGUMENT if version <= 3.5: l['findlinestarts'] = findlinestarts l['findlabels'] = findlabels l['get_jump_targets'] = get_jump_targets l['get_jump_target_maps'] = get_jump_target_maps else: l['findlinestarts'] = wordcode.findlinestarts l['findlabels'] = wordcode.findlabels l['get_jump_targets'] = wordcode.get_jump_targets l['get_jump_target_maps'] = wordcode.get_jump_target_maps l['opmap'] = deepcopy(from_mod.opmap) l['opname'] = deepcopy(from_mod.opname) for field in fields2copy: l[field] = list(getattr(from_mod, field))
[ "def", "init_opdata", "(", "l", ",", "from_mod", ",", "version", "=", "None", ",", "is_pypy", "=", "False", ")", ":", "if", "version", ":", "l", "[", "'python_version'", "]", "=", "version", "l", "[", "'is_pypy'", "]", "=", "is_pypy", "l", "[", "'cmp...
37.035714
15.464286
def to_name(self) -> str: """ Convert to ANSI color name :return: ANSI color name """ return { self.BLACK: 'black', self.RED: 'red', self.GREEN: 'green', self.YELLOW: 'yellow', self.BLUE: 'blue', self.MAGENTA: 'magenta', self.CYAN: 'cyan', self.WHITE: 'white' }[self.color]
[ "def", "to_name", "(", "self", ")", "->", "str", ":", "return", "{", "self", ".", "BLACK", ":", "'black'", ",", "self", ".", "RED", ":", "'red'", ",", "self", ".", "GREEN", ":", "'green'", ",", "self", ".", "YELLOW", ":", "'yellow'", ",", "self", ...
26.866667
9.266667
def clear_all(self): """Delete all Mentions from given split the database.""" logger.info("Clearing ALL Mentions.") self.session.query(Mention).delete(synchronize_session="fetch") # With no Mentions, there should be no Candidates also self.session.query(Candidate).delete(synchronize_session="fetch") logger.info("Cleared ALL Mentions (and Candidates).")
[ "def", "clear_all", "(", "self", ")", ":", "logger", ".", "info", "(", "\"Clearing ALL Mentions.\"", ")", "self", ".", "session", ".", "query", "(", "Mention", ")", ".", "delete", "(", "synchronize_session", "=", "\"fetch\"", ")", "# With no Mentions, there shou...
49.5
21.5
def init_ui(self): """Setup control widget UI.""" self.control_layout = QHBoxLayout() self.setLayout(self.control_layout) self.reset_button = QPushButton() self.reset_button.setFixedSize(40, 40) self.reset_button.setIcon(QtGui.QIcon(WIN_PATH)) self.game_timer = QLCDNumber() self.game_timer.setStyleSheet("QLCDNumber {color: red;}") self.game_timer.setFixedWidth(100) self.move_counter = QLCDNumber() self.move_counter.setStyleSheet("QLCDNumber {color: red;}") self.move_counter.setFixedWidth(100) self.control_layout.addWidget(self.game_timer) self.control_layout.addWidget(self.reset_button) self.control_layout.addWidget(self.move_counter)
[ "def", "init_ui", "(", "self", ")", ":", "self", ".", "control_layout", "=", "QHBoxLayout", "(", ")", "self", ".", "setLayout", "(", "self", ".", "control_layout", ")", "self", ".", "reset_button", "=", "QPushButton", "(", ")", "self", ".", "reset_button",...
43.941176
11.588235
def register_actions(self, shortcut_manager): """Register callback methods for triggered actions :param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager: Shortcut Manager Object holding mappings between shortcuts and actions. """ super(DescriptionEditorController, self).register_actions(shortcut_manager) shortcut_manager.add_callback_for_action("abort", self._abort)
[ "def", "register_actions", "(", "self", ",", "shortcut_manager", ")", ":", "super", "(", "DescriptionEditorController", ",", "self", ")", ".", "register_actions", "(", "shortcut_manager", ")", "shortcut_manager", ".", "add_callback_for_action", "(", "\"abort\"", ",", ...
53.25
24.625
def cluster_assignments(self): """ Return an array of cluster assignments corresponding to the most recent set of instances clustered. :return: the cluster assignments :rtype: ndarray """ array = javabridge.call(self.jobject, "getClusterAssignments", "()[D") if array is None: return None else: return javabridge.get_env().get_double_array_elements(array)
[ "def", "cluster_assignments", "(", "self", ")", ":", "array", "=", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"getClusterAssignments\"", ",", "\"()[D\"", ")", "if", "array", "is", "None", ":", "return", "None", "else", ":", "return", "j...
36.083333
21.916667
def eventFilter(self, watchedObject, event): """ Calls commitAndClose when the tab and back-tab are pressed. This is necessary because, normally the event filter of QStyledItemDelegate does this for us. However, that event filter works on this object, not on the sub editor. """ if event.type() == QtCore.QEvent.KeyPress: key = event.key() if key in (Qt.Key_Tab, Qt.Key_Backtab): self.commitAndClose() return True else: return False return super(AbstractCtiEditor, self).eventFilter(watchedObject, event)
[ "def", "eventFilter", "(", "self", ",", "watchedObject", ",", "event", ")", ":", "if", "event", ".", "type", "(", ")", "==", "QtCore", ".", "QEvent", ".", "KeyPress", ":", "key", "=", "event", ".", "key", "(", ")", "if", "key", "in", "(", "Qt", "...
45.142857
19.571429
def run(self): """ Kill any open Redshift sessions for the given database. """ connection = self.output().connect() # kill any sessions other than ours and # internal Redshift sessions (rdsdb) query = ("select pg_terminate_backend(process) " "from STV_SESSIONS " "where db_name=%s " "and user_name != 'rdsdb' " "and process != pg_backend_pid()") cursor = connection.cursor() logger.info('Killing all open Redshift sessions for database: %s', self.database) try: cursor.execute(query, (self.database,)) cursor.close() connection.commit() except psycopg2.DatabaseError as e: if e.message and 'EOF' in e.message: # sometimes this operation kills the current session. # rebuild the connection. Need to pause for 30-60 seconds # before Redshift will allow us back in. connection.close() logger.info('Pausing %s seconds for Redshift to reset connection', self.connection_reset_wait_seconds) time.sleep(self.connection_reset_wait_seconds) logger.info('Reconnecting to Redshift') connection = self.output().connect() else: raise try: self.output().touch(connection) connection.commit() finally: connection.close() logger.info('Done killing all open Redshift sessions for database: %s', self.database)
[ "def", "run", "(", "self", ")", ":", "connection", "=", "self", ".", "output", "(", ")", ".", "connect", "(", ")", "# kill any sessions other than ours and", "# internal Redshift sessions (rdsdb)", "query", "=", "(", "\"select pg_terminate_backend(process) \"", "\"from ...
41.5
18.131579
def submit_job(manager, job_config): """ Launch new job from specified config. May have been previously 'setup' if 'setup_params' in job_config is empty. """ # job_config is raw dictionary from JSON (from MQ or HTTP endpoint). job_id = job_config.get('job_id') try: command_line = job_config.get('command_line') setup_params = job_config.get('setup_params', {}) force_setup = job_config.get('setup') remote_staging = job_config.get('remote_staging', {}) dependencies_description = job_config.get('dependencies_description', None) env = job_config.get('env', []) submit_params = job_config.get('submit_params', {}) touch_outputs = job_config.get('touch_outputs', []) job_config = None if setup_params or force_setup: input_job_id = setup_params.get("job_id", job_id) tool_id = setup_params.get("tool_id", None) tool_version = setup_params.get("tool_version", None) use_metadata = setup_params.get("use_metadata", False) job_config = setup_job( manager, input_job_id, tool_id, tool_version, use_metadata, ) if job_config is not None: job_directory = job_config["job_directory"] jobs_directory = os.path.abspath(os.path.join(job_directory, os.pardir)) command_line = command_line.replace('__PULSAR_JOBS_DIRECTORY__', jobs_directory) # TODO: Handle __PULSAR_JOB_DIRECTORY__ config files, metadata files, etc... manager.touch_outputs(job_id, touch_outputs) launch_config = { "remote_staging": remote_staging, "command_line": command_line, "dependencies_description": dependencies_description, "submit_params": submit_params, "env": env, "setup_params": setup_params, } manager.preprocess_and_launch(job_id, launch_config) except Exception: manager.handle_failure_before_launch(job_id) raise
[ "def", "submit_job", "(", "manager", ",", "job_config", ")", ":", "# job_config is raw dictionary from JSON (from MQ or HTTP endpoint).", "job_id", "=", "job_config", ".", "get", "(", "'job_id'", ")", "try", ":", "command_line", "=", "job_config", ".", "get", "(", "...
42.285714
18.020408
def delete_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes a dataset and all of its contents. Returns empty response in the ``response`` field when it completes, and ``delete_details`` in the ``metadata`` field. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> response = client.delete_dataset(name) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: name (str): The resource name of the dataset to delete. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "delete_dataset" not in self._inner_api_calls: self._inner_api_calls[ "delete_dataset" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_dataset, default_retry=self._method_configs["DeleteDataset"].retry, default_timeout=self._method_configs["DeleteDataset"].timeout, client_info=self._client_info, ) request = service_pb2.DeleteDatasetRequest(name=name) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) operation = self._inner_api_calls["delete_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, )
[ "def", "delete_dataset", "(", "self", ",", "name", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadat...
39.423529
22.364706
def download(url, fname=None): """ Downloads a file. Args: url (str): The URL to download. fname (Optional[str]): The filename to store the downloaded file in. If `None`, take the filename from the URL. Defaults to `None`. Returns: The filename the URL was downloaded to. Raises: requests.exceptions.HTTPError: There was a problem connecting to the URL. """ # Determine the filename if fname is None: fname = url.split('/')[-1] # Stream the URL as a file, copying to local disk with contextlib.closing(requests.get(url, stream=True)) as r: try: r.raise_for_status() except requests.exceptions.HTTPError as error: print('Error connecting to URL: "{}"'.format(url)) print(r.text) raise error with open(fname, 'wb') as f: shutil.copyfileobj(r.raw, f) return fname
[ "def", "download", "(", "url", ",", "fname", "=", "None", ")", ":", "# Determine the filename", "if", "fname", "is", "None", ":", "fname", "=", "url", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "# Stream the URL as a file, copying to local disk", "wi...
28.121212
21.333333
def swipe_bottom(self, steps=10, *args, **selectors): """ Swipe the UI object with *selectors* from center to bottom See `Swipe Left` for more details. """ self.device(**selectors).swipe.down(steps=steps)
[ "def", "swipe_bottom", "(", "self", ",", "steps", "=", "10", ",", "*", "args", ",", "*", "*", "selectors", ")", ":", "self", ".", "device", "(", "*", "*", "selectors", ")", ".", "swipe", ".", "down", "(", "steps", "=", "steps", ")" ]
34.142857
13.857143
def _dictlist_to_lists(dl, *keys): ''' convert a list of dictionaries to a dictionary of lists >>> dl = [{'a': 'test', 'b': 3}, {'a': 'zaz', 'b': 444}, {'a': 'wow', 'b': 300}] >>> _dictlist_to_lists(dl) (['test', 'zaz', 'wow'], [3, 444, 300]) ''' lists = [] for k in keys: lists.append([]) for item in dl: for i, key in enumerate(keys): x = item[key] if isinstance(x, unicode): x = str(x) lists[i].append(x) return lists
[ "def", "_dictlist_to_lists", "(", "dl", ",", "*", "keys", ")", ":", "lists", "=", "[", "]", "for", "k", "in", "keys", ":", "lists", ".", "append", "(", "[", "]", ")", "for", "item", "in", "dl", ":", "for", "i", ",", "key", "in", "enumerate", "(...
29
16.111111
def get_cloud_service(self, cloud_service_id): ''' The Get Cloud Service operation gets all the resources (job collections) in the cloud service. cloud_service_id: The cloud service id ''' _validate_not_none('cloud_service_id', cloud_service_id) path = self._get_cloud_services_path(cloud_service_id) return self._perform_get(path, CloudService)
[ "def", "get_cloud_service", "(", "self", ",", "cloud_service_id", ")", ":", "_validate_not_none", "(", "'cloud_service_id'", ",", "cloud_service_id", ")", "path", "=", "self", ".", "_get_cloud_services_path", "(", "cloud_service_id", ")", "return", "self", ".", "_pe...
37.454545
21.454545
def create_table(table, connection, schema=None): """Create a single table, primarily used din migrations""" orig_schemas = {} # These schema shenanigans are almost certainly wrong. # But they are expedient. For Postgres, it puts the library # tables in the Library schema. We need to change the schema for all tables in case # the table we are creating references another table if schema: connection.execute("SET search_path TO {}".format(schema)) for table in ALL_TABLES: orig_schemas[table.__table__] = table.__table__.schema table.__table__.schema = schema table.__table__.create(bind=connection.engine) # We have to put the schemas back because when installing to a warehouse. # the same library classes can be used to access a Sqlite database, which # does not handle schemas. if schema: for it, orig_schema in list(orig_schemas.items()): it.schema = orig_schema
[ "def", "create_table", "(", "table", ",", "connection", ",", "schema", "=", "None", ")", ":", "orig_schemas", "=", "{", "}", "# These schema shenanigans are almost certainly wrong.", "# But they are expedient. For Postgres, it puts the library", "# tables in the Library schema. W...
42.916667
24.333333
def list_results(self, number, username): """ [deprecated] 建議使用方法 `get_question_results()` """ # 取得新 API 的結果 data = self.get_question_results(number, username) # 實作相容的結構 result = [] for number in data: # 儲存題目資訊 result += [(number, data[number])] # 回傳結果 return result
[ "def", "list_results", "(", "self", ",", "number", ",", "username", ")", ":", "# 取得新 API 的結果", "data", "=", "self", ".", "get_question_results", "(", "number", ",", "username", ")", "# 實作相容的結構", "result", "=", "[", "]", "for", "number", "in", "data", ":", ...
27.538462
13.692308
def load(self, format=None, *, kwargs={}): ''' deserialize object from the file. auto detect format by file extension name if `format` is None. for example, `.json` will detect as `json`. * raise `FormatNotFoundError` on unknown format. * raise `SerializeError` on any serialize exceptions. ''' return load(self, format=format, kwargs=kwargs)
[ "def", "load", "(", "self", ",", "format", "=", "None", ",", "*", ",", "kwargs", "=", "{", "}", ")", ":", "return", "load", "(", "self", ",", "format", "=", "format", ",", "kwargs", "=", "kwargs", ")" ]
36.181818
21.272727
def reasonable_desired_version(self, desired_version, allow_equal=False, allow_patch_skip=False): """ Determine whether the desired version is a reasonable next version. Parameters ---------- desired_version: str the proposed next version name """ try: desired_version = desired_version.base_version except: pass (new_major, new_minor, new_patch) = \ map(int, desired_version.split('.')) tag_versions = self._versions_from_tags() if not tag_versions: # no tags yet, and legal version is legal! return "" max_version = max(self._versions_from_tags()).base_version (old_major, old_minor, old_patch) = \ map(int, str(max_version).split('.')) update_str = str(max_version) + " -> " + str(desired_version) v_desired = vers.Version(desired_version) v_max = vers.Version(max_version) if allow_equal and v_desired == v_max: return "" if v_desired < v_max: return ("Bad update: New version doesn't increase on last tag: " + update_str + "\n") bad_update = skipped_version((old_major, old_minor, old_patch), (new_major, new_minor, new_patch), allow_patch_skip) msg = "" if bad_update: msg = ("Bad update: Did you skip a version from " + update_str + "?\n") return msg
[ "def", "reasonable_desired_version", "(", "self", ",", "desired_version", ",", "allow_equal", "=", "False", ",", "allow_patch_skip", "=", "False", ")", ":", "try", ":", "desired_version", "=", "desired_version", ".", "base_version", "except", ":", "pass", "(", "...
33.425532
20.957447
def _serialize_input_list(input_value): """Recursively serialize task input list""" input_list = [] for item in input_value: if isinstance(item, list): input_list.append(Task._serialize_input_list(item)) else: if isinstance(item, File): item = Task._to_api_file_format(item) input_list.append(item) return input_list
[ "def", "_serialize_input_list", "(", "input_value", ")", ":", "input_list", "=", "[", "]", "for", "item", "in", "input_value", ":", "if", "isinstance", "(", "item", ",", "list", ")", ":", "input_list", ".", "append", "(", "Task", ".", "_serialize_input_list"...
39.090909
10.272727
def modified_lines(self, r, file_name): """Returns the line numbers of a file which have been changed.""" cmd = self.file_diff_cmd(r, file_name) diff = shell_out_ignore_exitcode(cmd, cwd=self.root) return list(self.modified_lines_from_diff(diff))
[ "def", "modified_lines", "(", "self", ",", "r", ",", "file_name", ")", ":", "cmd", "=", "self", ".", "file_diff_cmd", "(", "r", ",", "file_name", ")", "diff", "=", "shell_out_ignore_exitcode", "(", "cmd", ",", "cwd", "=", "self", ".", "root", ")", "ret...
54.8
8.6
def skew_matrix(w): '''Return the skew matrix of a direction w.''' return np.array([[0, -w[2], w[1]], [w[2], 0, -w[0]], [-w[1], w[0], 0]])
[ "def", "skew_matrix", "(", "w", ")", ":", "return", "np", ".", "array", "(", "[", "[", "0", ",", "-", "w", "[", "2", "]", ",", "w", "[", "1", "]", "]", ",", "[", "w", "[", "2", "]", ",", "0", ",", "-", "w", "[", "0", "]", "]", ",", ...
36.8
7.2
def bezout(a, b): '''Compute the bezout algorithm of a and b, i.e. it returns u, v, p such as: p = GCD(a,b) a * u + b * v = p Copied from http://www.labri.fr/perso/betrema/deug/poly/euclide.html. ''' u = 1 v = 0 s = 0 t = 1 while b > 0: q = a // b r = a % b a = b b = r tmp = s s = u - q * s u = tmp tmp = t t = v - q * t v = tmp return u, v, a
[ "def", "bezout", "(", "a", ",", "b", ")", ":", "u", "=", "1", "v", "=", "0", "s", "=", "0", "t", "=", "1", "while", "b", ">", "0", ":", "q", "=", "a", "//", "b", "r", "=", "a", "%", "b", "a", "=", "b", "b", "=", "r", "tmp", "=", "...
19.25
27.083333
def untar_file(filename, location): """Untar the file (tar file located at filename) to the destination location""" if not os.path.exists(location): os.makedirs(location) if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'): mode = 'r:gz' elif filename.lower().endswith('.bz2') or filename.lower().endswith('.tbz'): mode = 'r:bz2' elif filename.lower().endswith('.tar'): mode = 'r' else: logger.warn('Cannot determine compression type for file %s' % filename) mode = 'r:*' tar = tarfile.open(filename, mode) try: # note: python<=2.5 doesnt seem to know about pax headers, filter them leading = has_leading_dir([ member.name for member in tar.getmembers() if member.name != 'pax_global_header' ]) for member in tar.getmembers(): fn = member.name if fn == 'pax_global_header': continue if leading: fn = split_leading_dir(fn)[1] path = os.path.join(location, fn) if member.isdir(): if not os.path.exists(path): os.makedirs(path) else: try: fp = tar.extractfile(member) except (KeyError, AttributeError): e = sys.exc_info()[1] # Some corrupt tar files seem to produce this # (specifically bad symlinks) logger.warn( 'In the tar file %s the member %s is invalid: %s' % (filename, member.name, e)) continue if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) destfp = open(path, 'wb') try: shutil.copyfileobj(fp, destfp) finally: destfp.close() fp.close() finally: tar.close()
[ "def", "untar_file", "(", "filename", ",", "location", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "location", ")", ":", "os", ".", "makedirs", "(", "location", ")", "if", "filename", ".", "lower", "(", ")", ".", "endswith", "(", ...
39.156863
15.019608
def get_all_cpv_use(cp): ''' .. versionadded:: 2015.8.0 Uses portage to determine final USE flags and settings for an emerge. @type cp: string @param cp: eg cat/pkg @rtype: lists @return use, use_expand_hidden, usemask, useforce ''' cpv = _get_cpv(cp) portage = _get_portage() use = None _porttree().dbapi.settings.unlock() try: _porttree().dbapi.settings.setcpv(cpv, mydb=portage.portdb) use = portage.settings['PORTAGE_USE'].split() use_expand_hidden = portage.settings["USE_EXPAND_HIDDEN"].split() usemask = list(_porttree().dbapi.settings.usemask) useforce = list(_porttree().dbapi.settings.useforce) except KeyError: _porttree().dbapi.settings.reset() _porttree().dbapi.settings.lock() return [], [], [], [] # reset cpv filter _porttree().dbapi.settings.reset() _porttree().dbapi.settings.lock() return use, use_expand_hidden, usemask, useforce
[ "def", "get_all_cpv_use", "(", "cp", ")", ":", "cpv", "=", "_get_cpv", "(", "cp", ")", "portage", "=", "_get_portage", "(", ")", "use", "=", "None", "_porttree", "(", ")", ".", "dbapi", ".", "settings", ".", "unlock", "(", ")", "try", ":", "_porttree...
33.103448
18.827586
def AddIndex(self, path_segment_index): """Adds a path segment index and sets its weight to 0. Args: path_segment_index: an integer containing the path segment index. Raises: ValueError: if the path segment weights already contains the path segment index. """ if path_segment_index in self._weight_per_index: raise ValueError('Path segment index already set.') self._weight_per_index[path_segment_index] = 0
[ "def", "AddIndex", "(", "self", ",", "path_segment_index", ")", ":", "if", "path_segment_index", "in", "self", ".", "_weight_per_index", ":", "raise", "ValueError", "(", "'Path segment index already set.'", ")", "self", ".", "_weight_per_index", "[", "path_segment_ind...
32.642857
19.571429
def _work_path_to_rel_final_path(path, upload_path_mapping, upload_base_dir): """ Check if `path` is a work-rooted path, and convert to a relative final-rooted path """ if not path or not isinstance(path, str): return path upload_path = None # First, check in the mapping: if it's there is a direct reference and # it's a file, we immediately return it (saves lots of iterations) if upload_path_mapping.get(path) is not None and os.path.isfile(path): upload_path = upload_path_mapping[path] else: # Not a file: check for elements in the mapping that contain # it paths_to_check = [key for key in upload_path_mapping if path.startswith(key)] if paths_to_check: for work_path in paths_to_check: if os.path.isdir(work_path): final_path = upload_path_mapping[work_path] upload_path = path.replace(work_path, final_path) break if upload_path is not None: return os.path.relpath(upload_path, upload_base_dir) else: return None
[ "def", "_work_path_to_rel_final_path", "(", "path", ",", "upload_path_mapping", ",", "upload_base_dir", ")", ":", "if", "not", "path", "or", "not", "isinstance", "(", "path", ",", "str", ")", ":", "return", "path", "upload_path", "=", "None", "# First, check in ...
39.785714
21.178571
def _default_bridge(self): """ Get an instance of the ENBridge object using ctypes. """ objc = self.objc ENBridge = objc.objc_getClass('ENBridge') return objc.objc_msgSend(ENBridge, objc.sel_registerName('instance'))
[ "def", "_default_bridge", "(", "self", ")", ":", "objc", "=", "self", ".", "objc", "ENBridge", "=", "objc", ".", "objc_getClass", "(", "'ENBridge'", ")", "return", "objc", ".", "objc_msgSend", "(", "ENBridge", ",", "objc", ".", "sel_registerName", "(", "'i...
48.8
15.2
def _try_switches(self, lines, index): """ For each switch in the Collector object, pass a list of string, representing lines of text in a file, and an index to the current line to try to flip the switch. A switch will only flip on if the line passes its 'test_on' method, and will only flip off if the line passes its 'test_off' method. :param lines: List of strings, usually the lines in a text file :param index: Number index pointing to the current line """ for s in self._switches: s.switch(lines, index)
[ "def", "_try_switches", "(", "self", ",", "lines", ",", "index", ")", ":", "for", "s", "in", "self", ".", "_switches", ":", "s", ".", "switch", "(", "lines", ",", "index", ")" ]
45.384615
18.923077
def write_object_to_file(self, query_results, filename, fmt="csv", coerce_to_timestamp=False, record_time_added=False): """ Write query results to file. Acceptable formats are: - csv: comma-separated-values file. This is the default format. - json: JSON array. Each element in the array is a different row. - ndjson: JSON array but each element is new-line delimited instead of comma delimited like in `json` This requires a significant amount of cleanup. Pandas doesn't handle output to CSV and json in a uniform way. This is especially painful for datetime types. Pandas wants to write them as strings in CSV, but as millisecond Unix timestamps. By default, this function will try and leave all values as they are represented in Salesforce. You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC). This is can be greatly beneficial as it will make all of your datetime fields look the same, and makes it easier to work with in other database environments :param query_results: the results from a SQL query :type query_results: list of dict :param filename: the name of the file where the data should be dumped to :type filename: str :param fmt: the format you want the output in. Default: 'csv' :type fmt: str :param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps. False if you want them to be left in the same format as they were in Salesforce. Leaving the value as False will result in datetimes being strings. Default: False :type coerce_to_timestamp: bool :param record_time_added: True if you want to add a Unix timestamp field to the resulting data that marks when the data was fetched from Salesforce. Default: False :type record_time_added: bool :return: the dataframe that gets written to the file. :rtype: pd.Dataframe """ fmt = fmt.lower() if fmt not in ['csv', 'json', 'ndjson']: raise ValueError("Format value is not recognized: {}".format(fmt)) # this line right here will convert all integers to floats # if there are any None/np.nan values in the column # that's because None/np.nan cannot exist in an integer column # we should write all of our timestamps as FLOATS in our final schema df = pd.DataFrame.from_records(query_results, exclude=["attributes"]) df.columns = [column.lower() for column in df.columns] # convert columns with datetime strings to datetimes # not all strings will be datetimes, so we ignore any errors that occur # we get the object's definition at this point and only consider # features that are DATE or DATETIME if coerce_to_timestamp and df.shape[0] > 0: # get the object name out of the query results # it's stored in the "attributes" dictionary # for each returned record object_name = query_results[0]['attributes']['type'] self.log.info("Coercing timestamps for: %s", object_name) schema = self.describe_object(object_name) # possible columns that can be converted to timestamps # are the ones that are either date or datetime types # strings are too general and we risk unintentional conversion possible_timestamp_cols = [ field['name'].lower() for field in schema['fields'] if field['type'] in ["date", "datetime"] and field['name'].lower() in df.columns ] df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(self._to_timestamp) if record_time_added: fetched_time = time.time() df["time_fetched_from_salesforce"] = fetched_time # write the CSV or JSON file depending on the option # NOTE: # datetimes here are an issue. # There is no good way to manage the difference # for to_json, the options are an epoch or a ISO string # but for to_csv, it will be a string output by datetime # For JSON we decided to output the epoch timestamp in seconds # (as is fairly standard for JavaScript) # And for csv, we do a string if fmt == "csv": # there are also a ton of newline objects that mess up our ability to write to csv # we remove these newlines so that the output is a valid CSV format self.log.info("Cleaning data and writing to CSV") possible_strings = df.columns[df.dtypes == "object"] df[possible_strings] = df[possible_strings].apply( lambda x: x.str.replace("\r\n", "").str.replace("\n", "") ) # write the dataframe df.to_csv(filename, index=False) elif fmt == "json": df.to_json(filename, "records", date_unit="s") elif fmt == "ndjson": df.to_json(filename, "records", lines=True, date_unit="s") return df
[ "def", "write_object_to_file", "(", "self", ",", "query_results", ",", "filename", ",", "fmt", "=", "\"csv\"", ",", "coerce_to_timestamp", "=", "False", ",", "record_time_added", "=", "False", ")", ":", "fmt", "=", "fmt", ".", "lower", "(", ")", "if", "fmt...
49.231481
25.305556
def plot_monthly_ic_heatmap(mean_monthly_ic, ax=None): """ Plots a heatmap of the information coefficient or returns by month. Parameters ---------- mean_monthly_ic : pd.DataFrame The mean monthly IC for N periods forward. Returns ------- ax : matplotlib.Axes The axes that were plotted on. """ mean_monthly_ic = mean_monthly_ic.copy() num_plots = len(mean_monthly_ic.columns) v_spaces = ((num_plots - 1) // 3) + 1 if ax is None: f, ax = plt.subplots(v_spaces, 3, figsize=(18, v_spaces * 6)) ax = ax.flatten() new_index_year = [] new_index_month = [] for date in mean_monthly_ic.index: new_index_year.append(date.year) new_index_month.append(date.month) mean_monthly_ic.index = pd.MultiIndex.from_arrays( [new_index_year, new_index_month], names=["year", "month"]) for a, (periods_num, ic) in zip(ax, mean_monthly_ic.iteritems()): sns.heatmap( ic.unstack(), annot=True, alpha=1.0, center=0.0, annot_kws={"size": 7}, linewidths=0.01, linecolor='white', cmap=cm.coolwarm_r, cbar=False, ax=a) a.set(ylabel='', xlabel='') a.set_title("Monthly Mean {} Period IC".format(periods_num)) if num_plots < len(ax): ax[-1].set_visible(False) return ax
[ "def", "plot_monthly_ic_heatmap", "(", "mean_monthly_ic", ",", "ax", "=", "None", ")", ":", "mean_monthly_ic", "=", "mean_monthly_ic", ".", "copy", "(", ")", "num_plots", "=", "len", "(", "mean_monthly_ic", ".", "columns", ")", "v_spaces", "=", "(", "(", "nu...
24.857143
19.964286
def recv_file_from_remote(dev, src_filename, dst_file, filesize): """Intended to be passed to the `remote` function as the xfer_func argument. Matches up with send_file_to_host. """ bytes_remaining = filesize if not HAS_BUFFER: bytes_remaining *= 2 # hexlify makes each byte into 2 buf_size = BUFFER_SIZE write_buf = bytearray(buf_size) while bytes_remaining > 0: read_size = min(bytes_remaining, buf_size) buf_remaining = read_size buf_index = 0 while buf_remaining > 0: read_buf = dev.read(buf_remaining) bytes_read = len(read_buf) if bytes_read: write_buf[buf_index:bytes_read] = read_buf[0:bytes_read] buf_index += bytes_read buf_remaining -= bytes_read if HAS_BUFFER: dst_file.write(write_buf[0:read_size]) else: dst_file.write(binascii.unhexlify(write_buf[0:read_size])) # Send an ack to the remote as a form of flow control dev.write(b'\x06') # ASCII ACK is 0x06 bytes_remaining -= read_size
[ "def", "recv_file_from_remote", "(", "dev", ",", "src_filename", ",", "dst_file", ",", "filesize", ")", ":", "bytes_remaining", "=", "filesize", "if", "not", "HAS_BUFFER", ":", "bytes_remaining", "*=", "2", "# hexlify makes each byte into 2", "buf_size", "=", "BUFFE...
40.666667
12.037037
def inverse(x): """ Transform to Timedelta from numerical format """ try: x = [pd.Timedelta(int(i)) for i in x] except TypeError: x = pd.Timedelta(int(x)) return x
[ "def", "inverse", "(", "x", ")", ":", "try", ":", "x", "=", "[", "pd", ".", "Timedelta", "(", "int", "(", "i", ")", ")", "for", "i", "in", "x", "]", "except", "TypeError", ":", "x", "=", "pd", ".", "Timedelta", "(", "int", "(", "x", ")", ")...
25.222222
13
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True): """ Returns an iterator of the contents contents of a file at a specific Commit. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path of the file. * offset_bytes: Optional. specifies a number of bytes that should be skipped in the beginning of the file. * size_bytes: Optional. limits the total amount of data returned, note you will get fewer bytes than size if you pass a value larger than the size of the file. If size is set to 0 then all of the data will be returned. * extract_value: If True, then an ExtractValueIterator will be return, which will iterate over the bytes of the file. If False, then the protobuf response iterator will return. """ req = proto.GetFileRequest( file=proto.File(commit=commit_from(commit), path=path), offset_bytes=offset_bytes, size_bytes=size_bytes ) res = self.stub.GetFile(req, metadata=self.metadata) if extract_value: return ExtractValueIterator(res) return res
[ "def", "get_file", "(", "self", ",", "commit", ",", "path", ",", "offset_bytes", "=", "0", ",", "size_bytes", "=", "0", ",", "extract_value", "=", "True", ")", ":", "req", "=", "proto", ".", "GetFileRequest", "(", "file", "=", "proto", ".", "File", "...
46.615385
22.153846
def create_comment(self, body): """Create a comment on this issue. :param str body: (required), comment body :returns: :class:`IssueComment <github3.issues.comment.IssueComment>` """ json = None if body: url = self._build_url('comments', base_url=self._api) json = self._json(self._post(url, data={'body': body}), 201) return IssueComment(json, self) if json else None
[ "def", "create_comment", "(", "self", ",", "body", ")", ":", "json", "=", "None", "if", "body", ":", "url", "=", "self", ".", "_build_url", "(", "'comments'", ",", "base_url", "=", "self", ".", "_api", ")", "json", "=", "self", ".", "_json", "(", "...
39
17.916667
def unique(s): """Return a list of the elements in s, but without duplicates. For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3], unique("abcabc") some permutation of ["a", "b", "c"], and unique(([1, 2], [2, 3], [1, 2])) some permutation of [[2, 3], [1, 2]]. For best speed, all sequence elements should be hashable. Then unique() will usually work in linear time. If not possible, the sequence elements should enjoy a total ordering, and if list(s).sort() doesn't raise TypeError it's assumed that they do enjoy a total ordering. Then unique() will usually work in O(N*log2(N)) time. If that's not possible either, the sequence elements must support equality-testing. Then unique() will usually work in quadratic time. """ n = len(s) if n == 0: return [] # Try using a dict first, as that's the fastest and will usually # work. If it doesn't work, it will usually fail quickly, so it # usually doesn't cost much to *try* it. It requires that all the # sequence elements be hashable, and support equality comparison. u = {} try: for x in s: u[x] = 1 except TypeError: pass # move on to the next method else: return list(u.keys()) del u # We can't hash all the elements. Second fastest is to sort, # which brings the equal elements together; then duplicates are # easy to weed out in a single pass. # NOTE: Python's list.sort() was designed to be efficient in the # presence of many duplicate elements. This isn't true of all # sort functions in all languages or libraries, so this approach # is more effective in Python than it may be elsewhere. try: t = sorted(s) except TypeError: pass # move on to the next method else: assert n > 0 last = t[0] lasti = i = 1 while i < n: if t[i] != last: t[lasti] = last = t[i] lasti = lasti + 1 i = i + 1 return t[:lasti] del t # Brute force is all that's left. u = [] for x in s: if x not in u: u.append(x) return u
[ "def", "unique", "(", "s", ")", ":", "n", "=", "len", "(", "s", ")", "if", "n", "==", "0", ":", "return", "[", "]", "# Try using a dict first, as that's the fastest and will usually", "# work. If it doesn't work, it will usually fail quickly, so it", "# usually doesn't c...
31.823529
23.014706
def from_path_by_size(dir_path, min_size=0, max_size=1 << 40): """Create a new FileCollection, and select all files that size in a range:: dir_path = "your/path" # select by file size larger than 100MB fc = FileCollection.from_path_by_size( dir_path, min_size=100*1024*1024) # select by file size smaller than 100MB fc = FileCollection.from_path_by_size( dir_path, max_size=100*1024*1024) # select by file size from 1MB to 100MB fc = FileCollection.from_path_by_size( dir_path, min_size=1024*1024, max_size=100*1024*1024) """ def filter(winfile): if (winfile.size_on_disk >= min_size) and \ (winfile.size_on_disk <= max_size): return True else: return False return FileCollection.from_path_by_criterion( dir_path, filter, keepboth=False)
[ "def", "from_path_by_size", "(", "dir_path", ",", "min_size", "=", "0", ",", "max_size", "=", "1", "<<", "40", ")", ":", "def", "filter", "(", "winfile", ")", ":", "if", "(", "winfile", ".", "size_on_disk", ">=", "min_size", ")", "and", "(", "winfile",...
37.925926
15.333333
def _schema_options(p): """ Add options specific to schema subcommand. """ p.add_argument( 'resource', metavar='selector', nargs='?', default=None).completer = _schema_tab_completer p.add_argument( '--summary', action="store_true", help="Summarize counts of available resources, actions and filters") p.add_argument('--json', action="store_true", help=argparse.SUPPRESS) p.add_argument("-v", "--verbose", action="count", help="Verbose logging") p.add_argument("-q", "--quiet", action="count", help=argparse.SUPPRESS) p.add_argument("--debug", default=False, help=argparse.SUPPRESS)
[ "def", "_schema_options", "(", "p", ")", ":", "p", ".", "add_argument", "(", "'resource'", ",", "metavar", "=", "'selector'", ",", "nargs", "=", "'?'", ",", "default", "=", "None", ")", ".", "completer", "=", "_schema_tab_completer", "p", ".", "add_argumen...
48.461538
22.615385
def parse(self, fail_callback=None): """ Parse text fields and file fields for values and files """ # get text fields for field in self.field_arguments: self.values[field['name']] = self.__get_value(field['name']) if self.values[field['name']] is None and field['required']: if fail_callback is not None: fail_callback() self.__invalid_request(field['error']) # get file fields for file in self.file_arguments: self.files[file['name']] = self.__get_file(file) if self.files[file['name']] is None and file['required']: if fail_callback is not None: fail_callback() self.__invalid_request(file['error'])
[ "def", "parse", "(", "self", ",", "fail_callback", "=", "None", ")", ":", "# get text fields\r", "for", "field", "in", "self", ".", "field_arguments", ":", "self", ".", "values", "[", "field", "[", "'name'", "]", "]", "=", "self", ".", "__get_value", "("...
49.5625
12.5
def _get_client(self): """ S3 Boto3 client Returns: boto3.session.Session.client: client """ client_kwargs = self._storage_parameters.get('client', dict()) # Handles unsecure mode if self._unsecure: client_kwargs = client_kwargs.copy() client_kwargs['use_ssl'] = False return self._get_session().client("s3", **client_kwargs)
[ "def", "_get_client", "(", "self", ")", ":", "client_kwargs", "=", "self", ".", "_storage_parameters", ".", "get", "(", "'client'", ",", "dict", "(", ")", ")", "# Handles unsecure mode", "if", "self", ".", "_unsecure", ":", "client_kwargs", "=", "client_kwargs...
27.6
18.4
def text(files): '''Returns the whole transcribed text''' sentences = convert_timestamps(files) out = [] for s in sentences: out.append(' '.join([w[0] for w in s['words']])) return '\n'.join(out)
[ "def", "text", "(", "files", ")", ":", "sentences", "=", "convert_timestamps", "(", "files", ")", "out", "=", "[", "]", "for", "s", "in", "sentences", ":", "out", ".", "append", "(", "' '", ".", "join", "(", "[", "w", "[", "0", "]", "for", "w", ...
31
15
def find_range(self, interval): """wrapper for find""" return self.find(self.tree, interval, self.start, self.end)
[ "def", "find_range", "(", "self", ",", "interval", ")", ":", "return", "self", ".", "find", "(", "self", ".", "tree", ",", "interval", ",", "self", ".", "start", ",", "self", ".", "end", ")" ]
42.666667
12
def ystep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`. """ self.Y = np.asarray(sp.prox_l2( self.AX + self.U, (self.lmbda/self.rho)*self.Wtvna, axis=self.saxes), dtype=self.dtype)
[ "def", "ystep", "(", "self", ")", ":", "self", ".", "Y", "=", "np", ".", "asarray", "(", "sp", ".", "prox_l2", "(", "self", ".", "AX", "+", "self", ".", "U", ",", "(", "self", ".", "lmbda", "/", "self", ".", "rho", ")", "*", "self", ".", "W...
32.5
13.5
def _find_files(dirpath: str) -> 'Iterable[str]': """Find files recursively. Returns a generator that yields paths in no particular order. """ for dirpath, dirnames, filenames in os.walk(dirpath, topdown=True, followlinks=True): if os.path.basename(dirpath).startswith('.'): del dirnames[:] for filename in filenames: yield os.path.join(dirpath, filename)
[ "def", "_find_files", "(", "dirpath", ":", "str", ")", "->", "'Iterable[str]'", ":", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "dirpath", ",", "topdown", "=", "True", ",", "followlinks", "=", "True", ")", ":", "...
40.909091
15.545455
def install_twisted(): """ If twisted is available, make `emit' return a DeferredList This has been successfully tested with Twisted 14.0 and later. """ global emit, _call_partial try: from twisted.internet import defer emit = _emit_twisted _call_partial = defer.maybeDeferred return True except ImportError: _call_partial = lambda fn, *a, **kw: fn(*a, **kw) return False
[ "def", "install_twisted", "(", ")", ":", "global", "emit", ",", "_call_partial", "try", ":", "from", "twisted", ".", "internet", "import", "defer", "emit", "=", "_emit_twisted", "_call_partial", "=", "defer", ".", "maybeDeferred", "return", "True", "except", "...
28.933333
16
def draw(self, **kwargs): """ Called from the fit method, this method creates the canvas and draws the distribution plot on it. Parameters ---------- kwargs: generic keyword arguments. """ # Prepare the data bins = np.arange(self.N) words = [self.features[i] for i in self.sorted_[:self.N]] freqs = {} # Set up the bar plots if self.conditional_freqdist_: for label, values in sorted(self.conditional_freqdist_.items(), key=itemgetter(0)): freqs[label] = [ values[i] for i in self.sorted_[:self.N] ] else: freqs['corpus'] = [ self.freqdist_[i] for i in self.sorted_[:self.N] ] # Draw a horizontal barplot if self.orient == 'h': # Add the barchart, stacking if necessary for label, freq in freqs.items(): self.ax.barh(bins, freq, label=label, align='center') # Set the y ticks to the words self.ax.set_yticks(bins) self.ax.set_yticklabels(words) # Order the features from top to bottom on the y axis self.ax.invert_yaxis() # Turn off y grid lines and turn on x grid lines self.ax.yaxis.grid(False) self.ax.xaxis.grid(True) # Draw a vertical barplot elif self.orient == 'v': # Add the barchart, stacking if necessary for label, freq in freqs.items(): self.ax.bar(bins, freq, label=label, align='edge') # Set the y ticks to the words self.ax.set_xticks(bins) self.ax.set_xticklabels(words, rotation=90) # Turn off x grid lines and turn on y grid lines self.ax.yaxis.grid(True) self.ax.xaxis.grid(False) # Unknown state else: raise YellowbrickValueError( "Orientation must be 'h' or 'v'" )
[ "def", "draw", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Prepare the data", "bins", "=", "np", ".", "arange", "(", "self", ".", "N", ")", "words", "=", "[", "self", ".", "features", "[", "i", "]", "for", "i", "in", "self", ".", "sorted_"...
32.064516
18.064516
def create_all(self, progress_callback: Optional[callable] = None) -> Dict[str, object]: """ Creates all the models discovered from fixture files in :attr:`fixtures_dir`. :param progress_callback: An optional function to track progress. It must take three parameters: - an :class:`Identifier` - the model instance - and a boolean specifying whether the model was created :return: A dictionary keyed by identifier where the values are model instances. """ if not self._loaded: self._load_data() # build up a directed acyclic graph to determine the model instantiation order dag = nx.DiGraph() for model_class_name, dependencies in self.relationships.items(): dag.add_node(model_class_name) for dep in dependencies: dag.add_edge(model_class_name, dep) try: creation_order = reversed(list(nx.topological_sort(dag))) except nx.NetworkXUnfeasible: raise Exception('Circular dependency detected between models: ' ', '.join(['{a} -> {b}'.format(a=a, b=b) for a, b in nx.find_cycle(dag)])) # create or update the models in the determined order rv = {} for model_class_name in creation_order: for identifier_key, data in self.model_fixtures[model_class_name].items(): identifier = Identifier(model_class_name, identifier_key) data = self.factory.maybe_convert_values(identifier, data) self._cache[identifier_key] = data model_instance, created = self.factory.create_or_update(identifier, data) if progress_callback: progress_callback(identifier, model_instance, created) rv[identifier_key] = model_instance self.factory.commit() return rv
[ "def", "create_all", "(", "self", ",", "progress_callback", ":", "Optional", "[", "callable", "]", "=", "None", ")", "->", "Dict", "[", "str", ",", "object", "]", ":", "if", "not", "self", ".", "_loaded", ":", "self", ".", "_load_data", "(", ")", "# ...
46.953488
25.697674
def concat(cls, variables, dim='concat_dim', positions=None, shortcut=False): """Specialized version of Variable.concat for IndexVariable objects. This exists because we want to avoid converting Index objects to NumPy arrays, if possible. """ if not isinstance(dim, str): dim, = dim.dims variables = list(variables) first_var = variables[0] if any(not isinstance(v, cls) for v in variables): raise TypeError('IndexVariable.concat requires that all input ' 'variables be IndexVariable objects') indexes = [v._data.array for v in variables] if not indexes: data = [] else: data = indexes[0].append(indexes[1:]) if positions is not None: indices = nputils.inverse_permutation( np.concatenate(positions)) data = data.take(indices) attrs = OrderedDict(first_var.attrs) if not shortcut: for var in variables: if var.dims != first_var.dims: raise ValueError('inconsistent dimensions') utils.remove_incompatible_items(attrs, var.attrs) return cls(first_var.dims, data, attrs)
[ "def", "concat", "(", "cls", ",", "variables", ",", "dim", "=", "'concat_dim'", ",", "positions", "=", "None", ",", "shortcut", "=", "False", ")", ":", "if", "not", "isinstance", "(", "dim", ",", "str", ")", ":", "dim", ",", "=", "dim", ".", "dims"...
34.324324
19
def make_xeditable(instance=None, extra_attrs=[], *args, **kwargs): """ Converts the contents of the column into an ``<a>`` tag with the required DOM attributes to power the X-Editable UI. The following keyword arguments are all optional, but may be provided when pre-calling the helper, to customize the output of the helper once it is run per object record: * ``type`` - Defaults to the basic type of the HTML input ("text", "number", "datetime") * ``title`` - Defaults to an empty string, controls the HTML "title" attribute. * ``placeholder`` - Defaults to whatever "title" is, controls the HTML "placeholder" attribute. * ``url`` - Defaults to the ``request.path`` of the view, which will automatically serve the X-Editable interface as long as it inherits from ``XEditableDatatableView``. * ``source`` - Defaults to the ``request.path`` of the view, which will automatically serve X-Editable requests for ``choices`` data about a field. Supplying a list of names via ``extra_attrs`` will enable arbitrary other keyword arguments to be rendered in the HTML as attribute as well. ``extra_attrs`` serves as a whitelist of extra names so that unintended kwargs don't get rendered without your permission. """ if instance is None: # Preloading kwargs into the helper for deferred execution helper = partial(make_xeditable, extra_attrs=extra_attrs, *args, **kwargs) return helper # Immediate finalization, return the xeditable structure data = kwargs.get('default_value', instance) rich_data = kwargs.get('rich_value', data) # Compile values to appear as "data-*" attributes on the anchor tag default_attr_names = ['pk', 'type', 'url', 'source', 'title', 'placeholder'] valid_attr_names = set(default_attr_names + list(extra_attrs)) attrs = {} for k, v in kwargs.items(): if k in valid_attr_names: if k.startswith('data_'): k = k[5:] attrs['data-{0}'.format(k)] = v attrs['data-xeditable'] = "xeditable" # Assign default values where they are not provided field_name = kwargs['field_name'] # sent as a default kwarg to helpers if isinstance(field_name, (tuple, list)): # Legacy syntax field_name = field_name[1] if isinstance(field_name, (tuple, list)): raise ValueError("'make_xeditable' helper needs a single-field data column," " not {0!r}".format(field_name)) attrs['data-name'] = field_name if isinstance(rich_data, Model): attrs['data-value'] = rich_data.pk else: attrs['data-value'] = rich_data if 'data-pk' not in attrs: attrs['data-pk'] = instance.pk if 'data-url' not in attrs: # Look for a backup data-url provider_name = 'get_update_url' url_provider = getattr(kwargs.get('view'), provider_name, None) if not url_provider: url_provider = getattr(instance, provider_name, None) if not url_provider and 'view' in kwargs: url_provider = lambda field_name: kwargs['view'].request.path else: raise ValueError("'make_xeditable' cannot determine a value for 'url'.") if url_provider: attrs['data-url'] = url_provider(field_name=field_name) if 'data-placeholder' not in attrs: attrs['data-placeholder'] = attrs.get('data-title', "") if 'data-type' not in attrs: if hasattr(instance, '_meta'): # Try to fetch a reasonable type from the field's class if field_name == 'pk': # special field name not in Model._meta.fields field = instance._meta.pk else: field = resolve_orm_path(instance, field_name) if field.choices: field_type = 'select' else: field_type = XEDITABLE_FIELD_TYPES.get(field.get_internal_type(), 'text') else: field_type = 'text' attrs['data-type'] = field_type # type=select elements need to fetch their valid choice options from an AJAX endpoint. # Register the view for this lookup. if attrs['data-type'] in ('select', 'select2'): if 'data-source' not in attrs: if 'view' in kwargs: attrs['data-source'] = "{url}?{field_param}={fieldname}".format(**{ 'url': kwargs['view'].request.path, 'field_param': kwargs['view'].xeditable_fieldname_param, 'fieldname': field_name, }) if attrs['data-type'] == 'select2': attrs['data-source'] += '&select2=true' else: raise ValueError("'make_xeditable' cannot determine a value for 'source'.") # Choice fields will want to display their readable label instead of db data data = getattr(instance, 'get_{0}_display'.format(field_name), lambda: data)() data = u"""<a href="#"{attrs}>{data}</a>""".format(attrs=flatatt(attrs), data=data) return data
[ "def", "make_xeditable", "(", "instance", "=", "None", ",", "extra_attrs", "=", "[", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "instance", "is", "None", ":", "# Preloading kwargs into the helper for deferred execution", "helper", "=", "pa...
45.315315
26.297297
def _exec_config_str(self, lhs, rhs): """execute self.config.<lhs> = <rhs> * expands ~ with expanduser * tries to assign with raw eval, otherwise assigns with just the string, allowing `--C.a=foobar` and `--C.a="foobar"` to be equivalent. *Not* equivalent are `--C.a=4` and `--C.a='4'`. """ rhs = os.path.expanduser(rhs) try: # Try to see if regular Python syntax will work. This # won't handle strings as the quote marks are removed # by the system shell. value = eval(rhs) except (NameError, SyntaxError): # This case happens if the rhs is a string. value = rhs exec u'self.config.%s = value' % lhs
[ "def", "_exec_config_str", "(", "self", ",", "lhs", ",", "rhs", ")", ":", "rhs", "=", "os", ".", "path", ".", "expanduser", "(", "rhs", ")", "try", ":", "# Try to see if regular Python syntax will work. This", "# won't handle strings as the quote marks are removed", "...
39.421053
16
def current_time(self) -> datetime: """Extract current time.""" _date = datetime.strptime(self.obj.SBRes.SBReq.StartT.get("date"), "%Y%m%d") _time = datetime.strptime(self.obj.SBRes.SBReq.StartT.get("time"), "%H:%M") return datetime.combine(_date.date(), _time.time())
[ "def", "current_time", "(", "self", ")", "->", "datetime", ":", "_date", "=", "datetime", ".", "strptime", "(", "self", ".", "obj", ".", "SBRes", ".", "SBReq", ".", "StartT", ".", "get", "(", "\"date\"", ")", ",", "\"%Y%m%d\"", ")", "_time", "=", "da...
59.2
22.2
def make_url(contents, domain=DEFAULT_DOMAIN, force_gist=False, size_for_gist=MAX_URL_LEN): """ Returns the URL to open given the domain and contents. If the file contents are large, an anonymous gist will be created. Parameters ---------- contents * string - assumed to be GeoJSON * an object that implements __geo_interface__ A FeatureCollection will be constructed with one feature, the object. * a sequence of objects that each implement __geo_interface__ A FeatureCollection will be constructed with the objects as the features domain - string, default http://geojson.io force_gist - force gist creation regardless of file size. For more information about __geo_interface__ see: https://gist.github.com/sgillies/2217756 If the contents are large, then a gist will be created. """ contents = make_geojson(contents) if len(contents) <= size_for_gist and not force_gist: url = data_url(contents, domain) else: gist = _make_gist(contents) url = gist_url(gist.id, domain) return url
[ "def", "make_url", "(", "contents", ",", "domain", "=", "DEFAULT_DOMAIN", ",", "force_gist", "=", "False", ",", "size_for_gist", "=", "MAX_URL_LEN", ")", ":", "contents", "=", "make_geojson", "(", "contents", ")", "if", "len", "(", "contents", ")", "<=", "...
33.176471
19.588235
def get_choices(module_name): """ Retrieve members from ``module_name``'s ``__all__`` list. :rtype: list """ try: module = importlib.import_module(module_name) if hasattr(module, '__all__'): return module.__all__ else: return [name for name, _ in inspect.getmembers(module, inspect.isclass) if name != "device"] except ImportError: return []
[ "def", "get_choices", "(", "module_name", ")", ":", "try", ":", "module", "=", "importlib", ".", "import_module", "(", "module_name", ")", "if", "hasattr", "(", "module", ",", "'__all__'", ")", ":", "return", "module", ".", "__all__", "else", ":", "return"...
28.533333
17.333333
def get_repo_data(saltenv='base'): ''' Returns the existing package metadata db. Will create it, if it does not exist, however will not refresh it. Args: saltenv (str): Salt environment. Default ``base`` Returns: dict: A dict containing contents of metadata db. CLI Example: .. code-block:: bash salt '*' pkg.get_repo_data ''' # we only call refresh_db if it does not exist, as we want to return # the existing data even if its old, other parts of the code call this, # but they will call refresh if they need too. repo_details = _get_repo_details(saltenv) if repo_details.winrepo_age == -1: # no repo meta db log.debug('No winrepo.p cache file. Refresh pkg db now.') refresh_db(saltenv=saltenv) if 'winrepo.data' in __context__: log.trace('get_repo_data returning results from __context__') return __context__['winrepo.data'] else: log.trace('get_repo_data called reading from disk') try: serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(repo_details.winrepo_file, 'rb') as repofile: try: repodata = salt.utils.data.decode(serial.loads(repofile.read()) or {}) __context__['winrepo.data'] = repodata return repodata except Exception as exc: log.exception(exc) return {} except IOError as exc: log.error('Not able to read repo file') log.exception(exc) return {}
[ "def", "get_repo_data", "(", "saltenv", "=", "'base'", ")", ":", "# we only call refresh_db if it does not exist, as we want to return", "# the existing data even if its old, other parts of the code call this,", "# but they will call refresh if they need too.", "repo_details", "=", "_get_r...
32.446809
22.021277
def _error_repr(error): """A compact unique representation of an error.""" error_repr = repr(error) if len(error_repr) > 200: error_repr = hash(type(error)) return error_repr
[ "def", "_error_repr", "(", "error", ")", ":", "error_repr", "=", "repr", "(", "error", ")", "if", "len", "(", "error_repr", ")", ">", "200", ":", "error_repr", "=", "hash", "(", "type", "(", "error", ")", ")", "return", "error_repr" ]
32.166667
10.166667
def _mitogen_reset(self, mode): """ Forget everything we know about the connected context. This function cannot be called _reset() since that name is used as a public API by Ansible 2.4 wait_for_connection plug-in. :param str mode: Name of ContextService method to use to discard the context, either 'put' or 'reset'. """ if not self.context: return self.chain.reset() self.parent.call_service( service_name='ansible_mitogen.services.ContextService', method_name=mode, context=self.context ) self.context = None self.login_context = None self.init_child_result = None self.chain = None
[ "def", "_mitogen_reset", "(", "self", ",", "mode", ")", ":", "if", "not", "self", ".", "context", ":", "return", "self", ".", "chain", ".", "reset", "(", ")", "self", ".", "parent", ".", "call_service", "(", "service_name", "=", "'ansible_mitogen.services....
31.208333
18.541667
def _next_pattern(self): """Parses the next pattern by matching each in turn.""" current_state = self.state_stack[-1] position = self._position for pattern in self.patterns: if current_state not in pattern.states: continue m = pattern.regex.match(self.source, position) if not m: continue position = m.end() token = None if pattern.next_state: self.state_stack.append(pattern.next_state) if pattern.action: callback = getattr(self, pattern.action, None) if callback is None: raise RuntimeError( "No method defined for pattern action %s!" % pattern.action) if "token" in m.groups(): value = m.group("token") else: value = m.group(0) token = callback(string=value, match=m, pattern=pattern) self._position = position return token self._error("Don't know how to match next. Did you forget quotes?", start=self._position, end=self._position + 1)
[ "def", "_next_pattern", "(", "self", ")", ":", "current_state", "=", "self", ".", "state_stack", "[", "-", "1", "]", "position", "=", "self", ".", "_position", "for", "pattern", "in", "self", ".", "patterns", ":", "if", "current_state", "not", "in", "pat...
32.868421
17.789474
def notebook_system_output(): """Get a context manager that attempts to use `wurlitzer <https://github.com/minrk/wurlitzer>`__ to capture system-level stdout/stderr within a Jupyter Notebook shell, without affecting normal operation when run as a Python script. For example: >>> sys_pipes = sporco.util.notebook_system_output() >>> with sys_pipes(): >>> command_producing_system_level_output() Returns ------- sys_pipes : context manager Context manager that handles output redirection when run within a Jupyter Notebook shell """ from contextlib import contextmanager @contextmanager def null_context_manager(): yield if in_notebook(): try: from wurlitzer import sys_pipes except ImportError: sys_pipes = null_context_manager else: sys_pipes = null_context_manager return sys_pipes
[ "def", "notebook_system_output", "(", ")", ":", "from", "contextlib", "import", "contextmanager", "@", "contextmanager", "def", "null_context_manager", "(", ")", ":", "yield", "if", "in_notebook", "(", ")", ":", "try", ":", "from", "wurlitzer", "import", "sys_pi...
28
20.46875
def output_after_run(self, run): """ The method output_after_run() prints filename, result, time and status of a run to terminal and stores all data in XML """ # format times, type is changed from float to string! cputime_str = util.format_number(run.cputime, TIME_PRECISION) walltime_str = util.format_number(run.walltime, TIME_PRECISION) # format numbers, number_of_digits is optional, so it can be None for column in run.columns: if column.number_of_digits is not None: # if the number ends with "s" or another letter, remove it if (not column.value.isdigit()) and column.value[-2:-1].isdigit(): column.value = column.value[:-1] try: floatValue = float(column.value) column.value = util.format_number(floatValue, column.number_of_digits) except ValueError: # if value is no float, don't format it pass # store information in run run.resultline = self.create_output_line(run.runSet, run.identifier, run.status, cputime_str, walltime_str, run.values.get('host'), run.columns) self.add_values_to_run_xml(run) # output in terminal/console statusStr = COLOR_DIC[run.category].format(run.status.ljust(LEN_OF_STATUS)) try: OutputHandler.print_lock.acquire() valueStr = statusStr + cputime_str.rjust(8) + walltime_str.rjust(8) if self.benchmark.num_of_threads == 1: util.printOut(valueStr) else: timeStr = time.strftime("%H:%M:%S", time.localtime()) + " "*14 util.printOut(timeStr + self.format_sourcefile_name(run.identifier, run.runSet) + valueStr) # write result in txt_file and XML self.txt_file.append(self.run_set_to_text(run.runSet), False) self.statistics.add_result(run) # we don't want to write this file to often, it can slow down the whole script, # so we wait at least 10 seconds between two write-actions currentTime = util.read_monotonic_time() if currentTime - run.runSet.xml_file_last_modified_time > 60: self._write_rough_result_xml_to_file(run.runSet.xml, run.runSet.xml_file_name) run.runSet.xml_file_last_modified_time = util.read_monotonic_time() finally: OutputHandler.print_lock.release() if self.compress_results: log_file_path = os.path.relpath(run.log_file, os.path.join(self.benchmark.log_folder, os.pardir)) with self.log_zip_lock: self.log_zip.write(run.log_file, log_file_path) os.remove(run.log_file) else: self.all_created_files.add(run.log_file) if os.path.isdir(run.result_files_folder): self.all_created_files.add(run.result_files_folder)
[ "def", "output_after_run", "(", "self", ",", "run", ")", ":", "# format times, type is changed from float to string!", "cputime_str", "=", "util", ".", "format_number", "(", "run", ".", "cputime", ",", "TIME_PRECISION", ")", "walltime_str", "=", "util", ".", "format...
44.119403
26.268657
def variant_support(variants, allele_support_df, ignore_missing=False): ''' Collect the read evidence support for the given variants. Parameters ---------- variants : iterable of varcode.Variant allele_support_df : dataframe Allele support dataframe, as output by the varlens-allele-support tool. It should have columns: source, contig, interbase_start, interbase_end, allele. The remaining columns are interpreted as read counts of various subsets of reads (e.g. all reads, non-duplicate reads, etc.) ignore_missing : boolean If True, then varaints with no allele counts will be interpreted as having 0 depth. If False, then an exception will be raised if any variants have no allele counts. Returns ---------- A pandas.Panel4D frame with these axes: labels (axis=0) : the type of read being counted, i.e. the read count fields in allele_support_df. items (axis=1) : the type of measurement (num_alt, num_ref, num_other, total_depth, alt_fraction, any_alt_fraction) major axis (axis=2) : the variants minor axis (axis=3) : the sources ''' missing = [ c for c in EXPECTED_COLUMNS if c not in allele_support_df.columns ] if missing: raise ValueError("Missing columns: %s" % " ".join(missing)) # Ensure our start and end fields are ints. allele_support_df[["interbase_start", "interbase_end"]] = ( allele_support_df[["interbase_start", "interbase_end"]].astype(int)) sources = sorted(allele_support_df["source"].unique()) allele_support_dict = collections.defaultdict(dict) for (i, row) in allele_support_df.iterrows(): key = ( row['source'], row.contig, row.interbase_start, row.interbase_end) allele_support_dict[key][row.allele] = row["count"] # We want an exception on bad lookups, so convert to a regular dict. allele_support_dict = dict(allele_support_dict) dataframe_dicts = collections.defaultdict( lambda: collections.defaultdict(list)) for variant in variants: for source in sources: key = (source, variant.contig, variant.start - 1, variant.end) try: alleles = allele_support_dict[key] except KeyError: message = ( "No allele counts in source %s for variant %s" % ( source, str(variant))) if ignore_missing: logging.warning(message) alleles = {} else: raise ValueError(message) alt = alleles.get(variant.alt, 0) ref = alleles.get(variant.ref, 0) total = sum(alleles.values()) other = total - alt - ref dataframe_dicts["num_alt"][source].append(alt) dataframe_dicts["num_ref"][source].append(ref) dataframe_dicts["num_other"][source].append(other) dataframe_dicts["total_depth"][source].append(total) dataframe_dicts["alt_fraction"][source].append( float(alt) / max(1, total)) dataframe_dicts["any_alt_fraction"][source].append( float(alt + other) / max(1, total)) dataframes = dict( (label, pandas.DataFrame(value, index=variants)) for (label, value) in dataframe_dicts.items()) return pandas.Panel(dataframes)
[ "def", "variant_support", "(", "variants", ",", "allele_support_df", ",", "ignore_missing", "=", "False", ")", ":", "missing", "=", "[", "c", "for", "c", "in", "EXPECTED_COLUMNS", "if", "c", "not", "in", "allele_support_df", ".", "columns", "]", "if", "missi...
35.14433
22.587629
def get_template(template_file='', **kwargs): """Get the Jinja2 template and renders with dict _kwargs_. Args: template_file (str): name of the template file kwargs: Keywords to use for rendering the Jinja2 template. Returns: String of rendered JSON template. """ template = get_template_object(template_file) LOG.info('Rendering template %s', template.filename) for key, value in kwargs.items(): LOG.debug('%s => %s', key, value) rendered_json = template.render(**kwargs) LOG.debug('Rendered JSON:\n%s', rendered_json) return rendered_json
[ "def", "get_template", "(", "template_file", "=", "''", ",", "*", "*", "kwargs", ")", ":", "template", "=", "get_template_object", "(", "template_file", ")", "LOG", ".", "info", "(", "'Rendering template %s'", ",", "template", ".", "filename", ")", "for", "k...
28.47619
19.285714
def commutes( m1: np.ndarray, m2: np.ndarray, *, rtol: float = 1e-5, atol: float = 1e-8) -> bool: """Determines if two matrices approximately commute. Two matrices A and B commute if they are square and have the same size and AB = BA. Args: m1: One of the matrices. m2: The other matrix. rtol: The per-matrix-entry relative tolerance on equality. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the two matrices have compatible sizes and a commutator equal to zero within tolerance. """ return (m1.shape[0] == m1.shape[1] and m1.shape == m2.shape and np.allclose(m1.dot(m2), m2.dot(m1), rtol=rtol, atol=atol))
[ "def", "commutes", "(", "m1", ":", "np", ".", "ndarray", ",", "m2", ":", "np", ".", "ndarray", ",", "*", ",", "rtol", ":", "float", "=", "1e-5", ",", "atol", ":", "float", "=", "1e-8", ")", "->", "bool", ":", "return", "(", "m1", ".", "shape", ...
31.458333
21
def create_new_csv(samples, args): """create csv file that can be use with bcbio -w template""" out_fn = os.path.splitext(args.csv)[0] + "-merged.csv" logger.info("Preparing new csv: %s" % out_fn) with file_transaction(out_fn) as tx_out: with open(tx_out, 'w') as handle: handle.write(_header(args.csv)) for s in samples: sample_name = s['name'] if isinstance(s['out_file'], list) else os.path.basename(s['out_file']) handle.write("%s,%s,%s\n" % (sample_name, s['name'], ",".join(s['anno'])))
[ "def", "create_new_csv", "(", "samples", ",", "args", ")", ":", "out_fn", "=", "os", ".", "path", ".", "splitext", "(", "args", ".", "csv", ")", "[", "0", "]", "+", "\"-merged.csv\"", "logger", ".", "info", "(", "\"Preparing new csv: %s\"", "%", "out_fn"...
56.3
17.3
def store_hash_configuration(self, lshash): """ Stores hash configuration """ self.redis_object.set(lshash.hash_name+'_conf', pickle.dumps(lshash.get_config()))
[ "def", "store_hash_configuration", "(", "self", ",", "lshash", ")", ":", "self", ".", "redis_object", ".", "set", "(", "lshash", ".", "hash_name", "+", "'_conf'", ",", "pickle", ".", "dumps", "(", "lshash", ".", "get_config", "(", ")", ")", ")" ]
37.6
12
def args(self) -> str: """Provides arguments for the command.""" return '{}{}{}{}{}{}{}{}{}{}{}'.format( to_ascii_hex(self._index, 2), to_ascii_hex(self._group_number, 2), to_ascii_hex(self._unit_number, 2), to_ascii_hex(int(self._enable_status), 4), to_ascii_hex(int(self._switches), 4), to_ascii_hex(self._current_status, 2), to_ascii_hex(self._down_count, 2), to_ascii_hex(encode_value_using_ma(self._message_attribute, self._current_reading), 2), to_ascii_hex(encode_value_using_ma(self._message_attribute, self._high_limit), 2), to_ascii_hex(encode_value_using_ma(self._message_attribute, self._low_limit), 2), to_ascii_hex(int(self._special_status), 2))
[ "def", "args", "(", "self", ")", "->", "str", ":", "return", "'{}{}{}{}{}{}{}{}{}{}{}'", ".", "format", "(", "to_ascii_hex", "(", "self", ".", "_index", ",", "2", ")", ",", "to_ascii_hex", "(", "self", ".", "_group_number", ",", "2", ")", ",", "to_ascii_...
56.714286
18.642857
def clinvar_submission_lines(submission_objs, submission_header): """Create the lines to include in a Clinvar submission csv file from a list of submission objects and a custom document header Args: submission_objs(list): a list of objects (variants or casedata) to include in a csv file submission_header(dict) : as in constants CLINVAR_HEADER and CASEDATA_HEADER, but with required fields only Returns: submission_lines(list) a list of strings, each string represents a line of the clinvar csv file to be doenloaded """ submission_lines = [] for submission_obj in submission_objs: # Loop over the submission objects. Each of these is a line csv_line = [] for header_key, header_value in submission_header.items(): # header_keys are the same keys as in submission_objs if header_key in submission_obj: # The field is filled in for this variant/casedata object csv_line.append('"'+submission_obj.get(header_key)+'"') else: # Empty field for this this variant/casedata object csv_line.append('""') submission_lines.append(','.join(csv_line)) return submission_lines
[ "def", "clinvar_submission_lines", "(", "submission_objs", ",", "submission_header", ")", ":", "submission_lines", "=", "[", "]", "for", "submission_obj", "in", "submission_objs", ":", "# Loop over the submission objects. Each of these is a line", "csv_line", "=", "[", "]",...
54.5
35.636364
def main(argv: Optional[Sequence[str]] = None) -> None: """Parse arguments and process the exam assignment.""" parser = ArgumentParser(description="Convert Jupyter Notebook exams to PDFs") parser.add_argument( "--exam", type=int, required=True, help="Exam number to convert", dest="exam_num", ) parser.add_argument( "--time", type=str, required=True, help="Time of exam to convert" ) parser.add_argument( "--date", type=str, required=True, help="The date the exam will take place" ) args = parser.parse_args(argv) process(args.exam_num, args.time, args.date)
[ "def", "main", "(", "argv", ":", "Optional", "[", "Sequence", "[", "str", "]", "]", "=", "None", ")", "->", "None", ":", "parser", "=", "ArgumentParser", "(", "description", "=", "\"Convert Jupyter Notebook exams to PDFs\"", ")", "parser", ".", "add_argument",...
35.388889
21.166667
def create_waf(self, name, waf_type): """ Creates a WAF with the given type. :param name: Name of the WAF. :param waf_type: WAF type. ('mod_security', 'Snort', 'Imperva SecureSphere', 'F5 BigIP ASM', 'DenyAll rWeb') """ params = { 'name': name, 'type': waf_type } return self._request('POST', 'rest/wafs/new', params)
[ "def", "create_waf", "(", "self", ",", "name", ",", "waf_type", ")", ":", "params", "=", "{", "'name'", ":", "name", ",", "'type'", ":", "waf_type", "}", "return", "self", ".", "_request", "(", "'POST'", ",", "'rest/wafs/new'", ",", "params", ")" ]
35.909091
16.818182
def hms(self, msg, tic=None, prt=sys.stdout): """Print elapsed time and message.""" if tic is None: tic = self.tic now = timeit.default_timer() hms = str(datetime.timedelta(seconds=(now-tic))) prt.write('{HMS}: {MSG}\n'.format(HMS=hms, MSG=msg)) return now
[ "def", "hms", "(", "self", ",", "msg", ",", "tic", "=", "None", ",", "prt", "=", "sys", ".", "stdout", ")", ":", "if", "tic", "is", "None", ":", "tic", "=", "self", ".", "tic", "now", "=", "timeit", ".", "default_timer", "(", ")", "hms", "=", ...
38.625
12.25
def tree_token_generator(el, indentation_level=0): """ Internal generator that yields tokens for the given HTML element as follows: - A tuple (LXML element, BEGIN, indentation_level) - Text right after the start of the tag, or None. - Recursively calls the token generator for all child objects - A tuple (LXML element, END, indentation_level) - Text right after the end of the tag, or None. """ if not isinstance(el.tag, string_class): return tag_name = el.tag.lower() is_indentation = is_indentation_element(el) if is_indentation: indentation_level += 1 yield (el, BEGIN, indentation_level) yield el.text for child in el.iterchildren(): for token in tree_token_generator(child, indentation_level): yield token if is_indentation: indentation_level -= 1 yield (el, END, indentation_level) yield el.tail
[ "def", "tree_token_generator", "(", "el", ",", "indentation_level", "=", "0", ")", ":", "if", "not", "isinstance", "(", "el", ".", "tag", ",", "string_class", ")", ":", "return", "tag_name", "=", "el", ".", "tag", ".", "lower", "(", ")", "is_indentation"...
24.944444
21.833333
def south_field_triple(self): "Returns a suitable description of this field for South." from south.modelsinspector import introspector field_class = "django.db.models.fields.CharField" args, kwargs = introspector(self) return (field_class, args, kwargs)
[ "def", "south_field_triple", "(", "self", ")", ":", "from", "south", ".", "modelsinspector", "import", "introspector", "field_class", "=", "\"django.db.models.fields.CharField\"", "args", ",", "kwargs", "=", "introspector", "(", "self", ")", "return", "(", "field_cl...
48
11.666667
def allByAge(self, cascadeFetch=False): ''' allByAge - Get the underlying objects which match the filter criteria, ordered oldest -> newest If you are doing a queue or just need the head/tail, consider .first() and .last() instead. @param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model will be fetched immediately. If False, foreign objects will be fetched on-access. @return - Objects of the Model instance associated with this query, sorted oldest->newest ''' matchedKeys = self.getPrimaryKeys(sortByAge=True) if matchedKeys: return self.getMultiple(matchedKeys, cascadeFetch=cascadeFetch) return IRQueryableList([], mdl=self.mdl)
[ "def", "allByAge", "(", "self", ",", "cascadeFetch", "=", "False", ")", ":", "matchedKeys", "=", "self", ".", "getPrimaryKeys", "(", "sortByAge", "=", "True", ")", "if", "matchedKeys", ":", "return", "self", ".", "getMultiple", "(", "matchedKeys", ",", "ca...
43.5625
35.3125
def getPrinted(self): """ returns "0", "1" or "2" to indicate Printed state. 0 -> Never printed. 1 -> Printed after last publish 2 -> Printed but republished afterwards. """ workflow = getToolByName(self, 'portal_workflow') review_state = workflow.getInfoFor(self, 'review_state', '') if review_state not in ['published']: return "0" report_list = sorted(self.objectValues('ARReport'), key=lambda report: report.getDatePublished()) if not report_list: return "0" last_report = report_list[-1] if last_report.getDatePrinted(): return "1" else: for report in report_list: if report.getDatePrinted(): return "2" return "0"
[ "def", "getPrinted", "(", "self", ")", ":", "workflow", "=", "getToolByName", "(", "self", ",", "'portal_workflow'", ")", "review_state", "=", "workflow", ".", "getInfoFor", "(", "self", ",", "'review_state'", ",", "''", ")", "if", "review_state", "not", "in...
37.954545
12.727273
def do_build(self, argv): """\ build [TARGETS] Build the specified TARGETS and their dependencies. 'b' is a synonym. """ import SCons.Node import SCons.SConsign import SCons.Script.Main options = copy.deepcopy(self.options) options, targets = self.parser.parse_args(argv[1:], values=options) SCons.Script.COMMAND_LINE_TARGETS = targets if targets: SCons.Script.BUILD_TARGETS = targets else: # If the user didn't specify any targets on the command line, # use the list of default targets. SCons.Script.BUILD_TARGETS = SCons.Script._build_plus_default nodes = SCons.Script.Main._build_targets(self.fs, options, targets, self.target_top) if not nodes: return # Call each of the Node's alter_targets() methods, which may # provide additional targets that ended up as part of the build # (the canonical example being a VariantDir() when we're building # from a source directory) and which we therefore need their # state cleared, too. x = [] for n in nodes: x.extend(n.alter_targets()[0]) nodes.extend(x) # Clean up so that we can perform the next build correctly. # # We do this by walking over all the children of the targets, # and clearing their state. # # We currently have to re-scan each node to find their # children, because built nodes have already been partially # cleared and don't remember their children. (In scons # 0.96.1 and earlier, this wasn't the case, and we didn't # have to re-scan the nodes.) # # Because we have to re-scan each node, we can't clear the # nodes as we walk over them, because we may end up rescanning # a cleared node as we scan a later node. Therefore, only # store the list of nodes that need to be cleared as we walk # the tree, and clear them in a separate pass. # # XXX: Someone more familiar with the inner workings of scons # may be able to point out a more efficient way to do this. SCons.Script.Main.progress_display("scons: Clearing cached node information ...") seen_nodes = {} def get_unseen_children(node, parent, seen_nodes=seen_nodes): def is_unseen(node, seen_nodes=seen_nodes): return node not in seen_nodes return [child for child in node.children(scan=1) if is_unseen(child)] def add_to_seen_nodes(node, parent, seen_nodes=seen_nodes): seen_nodes[node] = 1 # If this file is in a VariantDir and has a # corresponding source file in the source tree, remember the # node in the source tree, too. This is needed in # particular to clear cached implicit dependencies on the # source file, since the scanner will scan it if the # VariantDir was created with duplicate=0. try: rfile_method = node.rfile except AttributeError: return else: rfile = rfile_method() if rfile != node: seen_nodes[rfile] = 1 for node in nodes: walker = SCons.Node.Walker(node, kids_func=get_unseen_children, eval_func=add_to_seen_nodes) n = walker.get_next() while n: n = walker.get_next() for node in list(seen_nodes.keys()): # Call node.clear() to clear most of the state node.clear() # node.clear() doesn't reset node.state, so call # node.set_state() to reset it manually node.set_state(SCons.Node.no_state) node.implicit = None # Debug: Uncomment to verify that all Taskmaster reference # counts have been reset to zero. #if node.ref_count != 0: # from SCons.Debug import Trace # Trace('node %s, ref_count %s !!!\n' % (node, node.ref_count)) SCons.SConsign.Reset() SCons.Script.Main.progress_display("scons: done clearing node information.")
[ "def", "do_build", "(", "self", ",", "argv", ")", ":", "import", "SCons", ".", "Node", "import", "SCons", ".", "SConsign", "import", "SCons", ".", "Script", ".", "Main", "options", "=", "copy", ".", "deepcopy", "(", "self", ".", "options", ")", "option...
39.927928
22.333333
def _define_jco_args(cmd_parser): """ Define job configuration arguments. Returns groups defined, currently one. """ jo_group = cmd_parser.add_argument_group('Job options', 'Job configuration options') jo_group.add_argument('--job-name', help='Job name') jo_group.add_argument('--preload', action='store_true', help='Preload job onto all resources in the instance') jo_group.add_argument('--trace', choices=['error', 'warn', 'info', 'debug', 'trace'], help='Application trace level') jo_group.add_argument('--submission-parameters', '-p', nargs='+', action=_SubmitParamArg, help="Submission parameters as name=value pairs") jo_group.add_argument('--job-config-overlays', help="Path to file containing job configuration overlays JSON. Overrides any job configuration set by the application." , metavar='file') return jo_group,
[ "def", "_define_jco_args", "(", "cmd_parser", ")", ":", "jo_group", "=", "cmd_parser", ".", "add_argument_group", "(", "'Job options'", ",", "'Job configuration options'", ")", "jo_group", ".", "add_argument", "(", "'--job-name'", ",", "help", "=", "'Job name'", ")"...
53.625
41.25
def build_dependencies(self): """ Build the dependencies for this module. Parse the code with ast, find all the import statements, convert them into Dependency objects. """ highest = self.dsm or self.root if self is highest: highest = LeafNode() for _import in self.parse_code(): target = highest.get_target(_import['target']) if target: what = _import['target'].split('.')[-1] if what != target.name: _import['what'] = what _import['target'] = target self.dependencies.append(Dependency(source=self, **_import))
[ "def", "build_dependencies", "(", "self", ")", ":", "highest", "=", "self", ".", "dsm", "or", "self", ".", "root", "if", "self", "is", "highest", ":", "highest", "=", "LeafNode", "(", ")", "for", "_import", "in", "self", ".", "parse_code", "(", ")", ...
37.555556
11.333333
def get_fixed_argv(self): # pragma: no cover """Get proper arguments for re-running the command. This is primarily for fixing some issues under Windows. First, there was a bug in Windows when running an executable located at a path with a space in it. This has become a non-issue with current versions of Python and Windows, so we don't take measures like adding quotes or calling win32api.GetShortPathName() as was necessary in former times. Second, depending on whether gearbox was installed as an egg or a wheel under Windows, it is run as a .py or an .exe stub. In the first case, we need to run it through the interpreter. On other operating systems, we can re-run the command as is. """ argv = sys.argv[:] if sys.platform == 'win32' and argv[0].endswith('.py'): argv.insert(0, sys.executable) return argv
[ "def", "get_fixed_argv", "(", "self", ")", ":", "# pragma: no cover", "argv", "=", "sys", ".", "argv", "[", ":", "]", "if", "sys", ".", "platform", "==", "'win32'", "and", "argv", "[", "0", "]", ".", "endswith", "(", "'.py'", ")", ":", "argv", ".", ...
44.142857
23.190476
def copy_package(owner, repo, identifier, destination): """Copy a package to another repository.""" client = get_packages_api() with catch_raise_api_exception(): data, _, headers = client.packages_copy_with_http_info( owner=owner, repo=repo, identifier=identifier, data={"destination": destination}, ) ratelimits.maybe_rate_limit(client, headers) return data.slug_perm, data.slug
[ "def", "copy_package", "(", "owner", ",", "repo", ",", "identifier", ",", "destination", ")", ":", "client", "=", "get_packages_api", "(", ")", "with", "catch_raise_api_exception", "(", ")", ":", "data", ",", "_", ",", "headers", "=", "client", ".", "packa...
32.285714
15.642857