text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def calc_schm_wats_v1(self): """Calculate the actual amount of water melting within the snow cover. Required control parameters: |NHRU| |Lnk| Required flux sequences: |SBes| |WGTF| Calculated flux sequence: |Schm| Updated state sequence: |WATS| Basic equations: :math:`\\frac{dWATS}{dt} = SBes - Schm` :math:`Schm = \\Bigl \\lbrace { {WGTF \\ | \\ WATS > 0} \\atop {0 \\ | \\ WATS = 0} }` Examples: Initialize two water (|FLUSS| and |SEE|) and four arable land (|ACKER|) HRUs. Assume the same values for the initial amount of frozen water (|WATS|) and the frozen part of stand precipitation (|SBes|), but different values for potential snowmelt (|WGTF|): >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(6) >>> lnk(FLUSS, SEE, ACKER, ACKER, ACKER, ACKER) >>> states.wats = 2.0 >>> fluxes.sbes = 1.0 >>> fluxes.wgtf = 1.0, 1.0, 0.0, 1.0, 3.0, 5.0 >>> model.calc_schm_wats_v1() >>> states.wats wats(0.0, 0.0, 3.0, 2.0, 0.0, 0.0) >>> fluxes.schm schm(0.0, 0.0, 0.0, 1.0, 3.0, 3.0) For the water areas, both the frozen amount of water and actual melt are set to zero. For all other land use classes, actual melt is either limited by potential melt or the available frozen water, which is the sum of initial frozen water and the frozen part of stand precipitation. """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nhru): if con.lnk[k] in (WASSER, FLUSS, SEE): sta.wats[k] = 0. flu.schm[k] = 0. else: sta.wats[k] += flu.sbes[k] flu.schm[k] = min(flu.wgtf[k], sta.wats[k]) sta.wats[k] -= flu.schm[k]
[ "def", "calc_schm_wats_v1", "(", "self", ")", ":", "con", "=", "self", ".", "parameters", ".", "control", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "sta", "=", "self", ".", "sequences", ".", "states", ".",...
30.571429
19.730159
def reloadFileOfCurrentItem(self, rtiRegItem=None): """ Finds the repo tree item that holds the file of the current item and reloads it. Reloading is done by removing the repo tree item and inserting a new one. The new item will have by of type rtiRegItem.cls. If rtiRegItem is None (the default), the new rtiClass will be the same as the old one. The rtiRegItem.cls will be imported. If this fails the old class will be used, and a warning will be logged. """ logger.debug("reloadFileOfCurrentItem, rtiClass={}".format(rtiRegItem)) currentIndex = self.getRowCurrentIndex() if not currentIndex.isValid(): return currentItem, _ = self.getCurrentItem() oldPath = currentItem.nodePath fileRtiIndex = self.model().findFileRtiIndex(currentIndex) isExpanded = self.isExpanded(fileRtiIndex) if rtiRegItem is None: rtiClass = None else: rtiRegItem.tryImportClass() rtiClass = rtiRegItem.cls newRtiIndex = self.model().reloadFileAtIndex(fileRtiIndex, rtiClass=rtiClass) try: # Expand and select the name with the old path _lastItem, lastIndex = self.expandPath(oldPath) self.setCurrentIndex(lastIndex) return lastIndex except Exception as ex: # The old path may not exist anymore. In that case select file RTI logger.warning("Unable to select {!r} beause of: {}".format(oldPath, ex)) self.setExpanded(newRtiIndex, isExpanded) self.setCurrentIndex(newRtiIndex) return newRtiIndex
[ "def", "reloadFileOfCurrentItem", "(", "self", ",", "rtiRegItem", "=", "None", ")", ":", "logger", ".", "debug", "(", "\"reloadFileOfCurrentItem, rtiClass={}\"", ".", "format", "(", "rtiRegItem", ")", ")", "currentIndex", "=", "self", ".", "getRowCurrentIndex", "(...
40.609756
22.682927
def _visit_content(item, parent, visitor): """ Recursively visit nodes in the project tree. :param item: LocalContent/LocalFolder/LocalFile we are traversing down from :param parent: LocalContent/LocalFolder parent or None :param visitor: object visiting the tree """ if KindType.is_project(item): visitor.visit_project(item) elif KindType.is_folder(item): visitor.visit_folder(item, parent) else: visitor.visit_file(item, parent) if not KindType.is_file(item): for child in item.children: ProjectWalker._visit_content(child, item, visitor)
[ "def", "_visit_content", "(", "item", ",", "parent", ",", "visitor", ")", ":", "if", "KindType", ".", "is_project", "(", "item", ")", ":", "visitor", ".", "visit_project", "(", "item", ")", "elif", "KindType", ".", "is_folder", "(", "item", ")", ":", "...
41.8125
9.9375
def __generate_tree(self, top, src, resources, models, ctrls, views, utils): """Creates directories and packages""" res = self.__mkdir(top) for fn in (src, models, ctrls, views, utils): res = self.__mkpkg(fn) or res res = self.__mkdir(resources) or res res = self.__mkdir(os.path.join(resources, "ui", "builder")) or res res = self.__mkdir(os.path.join(resources, "ui", "styles")) or res res = self.__mkdir(os.path.join(resources, "external")) or res return res
[ "def", "__generate_tree", "(", "self", ",", "top", ",", "src", ",", "resources", ",", "models", ",", "ctrls", ",", "views", ",", "utils", ")", ":", "res", "=", "self", ".", "__mkdir", "(", "top", ")", "for", "fn", "in", "(", "src", ",", "models", ...
57.444444
23.666667
def gisland(self, dae): """Reset g(x) for islanded buses and areas""" if (not self.islanded_buses) and (not self.island_sets): return a, v = list(), list() # for islanded areas without a slack bus # TODO: fix for islanded sets without sw # for island in self.island_sets: # nosw = 1 # for item in self.system.SW.bus: # if self.uid[item] in island: # nosw = 0 # break # if nosw: # self.islanded_buses += island # self.island_sets.remove(island) a = self.islanded_buses v = [self.n + item for item in a] dae.g[a] = 0 dae.g[v] = 0
[ "def", "gisland", "(", "self", ",", "dae", ")", ":", "if", "(", "not", "self", ".", "islanded_buses", ")", "and", "(", "not", "self", ".", "island_sets", ")", ":", "return", "a", ",", "v", "=", "list", "(", ")", ",", "list", "(", ")", "# for isla...
31.478261
15.086957
def plot(self, x, y, **kw): """plot x, y values (erasing old plot), for method options see PlotPanel.plot. """ return self.frame.plot(x,y,**kw)
[ "def", "plot", "(", "self", ",", "x", ",", "y", ",", "*", "*", "kw", ")", ":", "return", "self", ".", "frame", ".", "plot", "(", "x", ",", "y", ",", "*", "*", "kw", ")" ]
34.4
3.8
def getOrCreate(cls, sc): """ Get the existing SQLContext or create a new one with given SparkContext. :param sc: SparkContext """ if cls._instantiatedContext is None: jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc()) sparkSession = SparkSession(sc, jsqlContext.sparkSession()) cls(sc, sparkSession, jsqlContext) return cls._instantiatedContext
[ "def", "getOrCreate", "(", "cls", ",", "sc", ")", ":", "if", "cls", ".", "_instantiatedContext", "is", "None", ":", "jsqlContext", "=", "sc", ".", "_jvm", ".", "SQLContext", ".", "getOrCreate", "(", "sc", ".", "_jsc", ".", "sc", "(", ")", ")", "spark...
38.909091
16
def apply(self, q, bindings, drilldowns): """ Apply a set of grouping criteria and project them. """ info = [] for drilldown in self.parse(drilldowns): for attribute in self.cube.model.match(drilldown): info.append(attribute.ref) table, column = attribute.bind(self.cube) bindings.append(Binding(table, attribute.ref)) q = q.column(column) q = q.group_by(column) return info, q, bindings
[ "def", "apply", "(", "self", ",", "q", ",", "bindings", ",", "drilldowns", ")", ":", "info", "=", "[", "]", "for", "drilldown", "in", "self", ".", "parse", "(", "drilldowns", ")", ":", "for", "attribute", "in", "self", ".", "cube", ".", "model", "....
45.545455
9.909091
def save(self, fname=None): """ Download the chart from the URL into a filename as a PNG The filename defaults to the chart title (chtt) if any """ if not fname: fname = self.getname() assert fname != None, 'You must specify a filename to save to' if not fname.endswith('.png'): fname += '.png' try: urlretrieve(self.url, fname) except Exception: raise IOError('Problem saving %s to file'%fname) return fname
[ "def", "save", "(", "self", ",", "fname", "=", "None", ")", ":", "if", "not", "fname", ":", "fname", "=", "self", ".", "getname", "(", ")", "assert", "fname", "!=", "None", ",", "'You must specify a filename to save to'", "if", "not", "fname", ".", "ends...
32.625
15.75
def _GetUserTypeAndPassword(username, password=None, is_admin=False): """Returns the user-type and password for a user. Args: username: Username for the user. password: Password for the user. If None, or not provided, we will prompt for one via the terminal. is_admin: Indicates whether the user should have admin privileges. """ if is_admin: user_type = api_user.ApiGrrUser.UserType.USER_TYPE_ADMIN else: user_type = api_user.ApiGrrUser.UserType.USER_TYPE_STANDARD if password is None: # TODO # pytype: disable=wrong-arg-types password = getpass.getpass(prompt="Please enter password for user '%s':" % username) # pytype: enable=wrong-arg-types return user_type, password
[ "def", "_GetUserTypeAndPassword", "(", "username", ",", "password", "=", "None", ",", "is_admin", "=", "False", ")", ":", "if", "is_admin", ":", "user_type", "=", "api_user", ".", "ApiGrrUser", ".", "UserType", ".", "USER_TYPE_ADMIN", "else", ":", "user_type",...
37.05
19.45
def hi_stops(self, hi_stops): '''Set the hi stop values for this object's degrees of freedom. Parameters ---------- hi_stops : float or sequence of float A hi stop value to set on all degrees of freedom, or a list containing one such value for each degree of freedom. For rotational degrees of freedom, these values must be in radians. ''' _set_params(self.ode_obj, 'HiStop', hi_stops, self.ADOF + self.LDOF)
[ "def", "hi_stops", "(", "self", ",", "hi_stops", ")", ":", "_set_params", "(", "self", ".", "ode_obj", ",", "'HiStop'", ",", "hi_stops", ",", "self", ".", "ADOF", "+", "self", ".", "LDOF", ")" ]
43.909091
26.454545
def to_xml(self): ''' Returns a DOM element containing the XML representation of the invoice @return:Element ''' if not len(self.groups): raise InvoiceError("An invoice must have at least one group " \ "of lines.") for n, v in {"identifier": self.identifier, "name": self.name, "currency": self.currency, "seller": self.seller, "buyer":self.buyer, "status": self.status, "date": self.date, "due_date": self.due_date, "identifier": self.identifier, "mentions": self.mentions, 'domain': self.domain}.items(): if is_empty_or_none(v): raise InvoiceError("'%s' attribute cannot be empty or " \ "None." % n) total_invoice = self.total total_payments = sum([payment.amount for payment in self.payments]) if total_payments > total_invoice: raise InvoiceError('The sum of the payments declared ' \ '(%f %s) can\'t be superior to the ' \ 'total of the invoice (%f %s).' % (total_payments, self.currency, total_invoice, self.currency)) if self.status == INVOICE_PAID and total_payments < total_invoice: raise InvoiceError('The invoice can only be marked as paid ' \ 'if the sum of its payments (%f %s) is ' \ 'equal to its total (%f %s).' % (total_payments, self.currency, total_invoice, self.currency)) doc = Document() root = doc.createElement("invoice") root.setAttribute('xmlns', DEFAULT_NAMESPACE) root.setAttribute("domain", self.domain) root.setAttribute("version", XMLi_VERSION) root.setAttribute("agent", AGENT) #Adding custom elements super(Invoice, self).to_xml(root) self._create_text_node(root, "id", self.identifier) self._create_text_node(root, "name", self.name, True) self._create_text_node(root, "description", self.description, True) self._create_text_node(root, "date", self.date) self._create_text_node(root, "dueDate", self.due_date) self._create_text_node(root, "currency", self.currency) self._create_text_node(root, "status", self.status) root.appendChild(self.seller.to_xml("seller")) root.appendChild(self.buyer.to_xml("buyer")) if self.__shipping: root.appendChild(self.shipping.to_xml()) self._create_text_node(root, "terms", self.terms, True) self._create_text_node(root, "mentions", self.mentions, True) if len(self.__payments): payments = doc.createElement("payments") for payment in self.__payments: payments.appendChild(payment.to_xml()) root.appendChild(payments) if len(self.deliveries): deliveries = doc.createElement('deliveries') for delivery in self.__deliveries: deliveries.appendChild(delivery.to_xml()) root.appendChild(deliveries) body = doc.createElement("body") root.appendChild(body) groups = doc.createElement("groups") body.appendChild(groups) for group in self.__groups: if not issubclass(group.__class__, Group): raise InvoiceError('group of type %s is not an instance ' \ 'or a subclass of %s' % (group.__class__.__name__, Group.__name__)) groups.appendChild(group.to_xml()) return root
[ "def", "to_xml", "(", "self", ")", ":", "if", "not", "len", "(", "self", ".", "groups", ")", ":", "raise", "InvoiceError", "(", "\"An invoice must have at least one group \"", "\"of lines.\"", ")", "for", "n", ",", "v", "in", "{", "\"identifier\"", ":", "sel...
45.916667
20.607143
def expand_groups(grp): """Expand group names. Args: grp (string): group names to expand Returns: list of groups Examples: * grp[1-3] will be expanded to [grp1, grp2, grp3] * grp1 will be expanded to [grp1] """ p = re.compile(r"(?P<name>.+)\[(?P<start>\d+)-(?P<end>\d+)\]") m = p.match(grp) if m is not None: s = int(m.group('start')) e = int(m.group('end')) n = m.group('name') return list(map(lambda x: n + str(x), range(s, e + 1))) else: return [grp]
[ "def", "expand_groups", "(", "grp", ")", ":", "p", "=", "re", ".", "compile", "(", "r\"(?P<name>.+)\\[(?P<start>\\d+)-(?P<end>\\d+)\\]\"", ")", "m", "=", "p", ".", "match", "(", "grp", ")", "if", "m", "is", "not", "None", ":", "s", "=", "int", "(", "m"...
23.608696
20.434783
def shell_context_processor(self, fn): """ Registers a shell context processor function. """ self._defer(lambda app: app.shell_context_processor(fn)) return fn
[ "def", "shell_context_processor", "(", "self", ",", "fn", ")", ":", "self", ".", "_defer", "(", "lambda", "app", ":", "app", ".", "shell_context_processor", "(", "fn", ")", ")", "return", "fn" ]
32.333333
10.333333
def set_bit(self, position: int): """ Sets the value at position :param position: integer between 0 and 7, inclusive :return: None """ if position > (self._bit_width - 1): raise ValueError('position greater than the bit width') self._value |= (1 << position) self._text_update()
[ "def", "set_bit", "(", "self", ",", "position", ":", "int", ")", ":", "if", "position", ">", "(", "self", ".", "_bit_width", "-", "1", ")", ":", "raise", "ValueError", "(", "'position greater than the bit width'", ")", "self", ".", "_value", "|=", "(", "...
28.75
14.75
def functions(self): """ Returns all documented module level functions in the module sorted alphabetically as a list of `pydoc.Function`. """ p = lambda o: isinstance(o, Function) and self._docfilter(o) return sorted(filter(p, self.doc.values()))
[ "def", "functions", "(", "self", ")", ":", "p", "=", "lambda", "o", ":", "isinstance", "(", "o", ",", "Function", ")", "and", "self", ".", "_docfilter", "(", "o", ")", "return", "sorted", "(", "filter", "(", "p", ",", "self", ".", "doc", ".", "va...
41.142857
15.142857
def _helper(result, graph, number_edges_remaining: int, node_blacklist: Set[BaseEntity], invert_degrees: Optional[bool] = None, ): """Help build a random graph. :type result: networkx.Graph :type graph: networkx.Graph """ original_node_count = graph.number_of_nodes() log.debug('adding remaining %d edges', number_edges_remaining) for _ in range(number_edges_remaining): source, possible_step_nodes, c = None, set(), 0 while not source or not possible_step_nodes: source = get_random_node(result, node_blacklist, invert_degrees=invert_degrees) c += 1 if c >= original_node_count: log.warning('infinite loop happening') log.warning('source: %s', source) log.warning('no grow: %s', node_blacklist) return # Happens when after exhausting the connected components. Try increasing the number seed edges if source is None: continue # maybe do something else? # Only keep targets in the original graph that aren't in the result graph possible_step_nodes = set(graph[source]) - set(result[source]) if not possible_step_nodes: node_blacklist.add( source) # there aren't any possible nodes to step to, so try growing from somewhere else step_node = random.choice(list(possible_step_nodes)) # it's not really a big deal which, but it might be possible to weight this by the utility of edges later key, attr_dict = random.choice(list(graph[source][step_node].items())) result.add_edge(source, step_node, key=key, **attr_dict)
[ "def", "_helper", "(", "result", ",", "graph", ",", "number_edges_remaining", ":", "int", ",", "node_blacklist", ":", "Set", "[", "BaseEntity", "]", ",", "invert_degrees", ":", "Optional", "[", "bool", "]", "=", "None", ",", ")", ":", "original_node_count", ...
40
25.302326
def can_allow_multiple_input_shapes(spec): """ Examines a model specification and determines if it can compute results for more than one output shape. :param spec: MLModel The protobuf specification of the model. :return: Bool Returns True if the model can allow multiple input shapes, False otherwise. """ # First, check that the model actually has a neural network in it try: layers = _get_nn_layers(spec) except: raise Exception('Unable to verify that this model contains a neural network.') try: shaper = NeuralNetworkShaper(spec, False) except: raise Exception('Unable to compute shapes for this neural network.') inputs = _get_input_names(spec) for name in inputs: shape_dict = shaper.shape(name) shape = NeuralNetworkMultiArrayShapeRange(shape_dict) if (shape.isFlexible()): return True return False
[ "def", "can_allow_multiple_input_shapes", "(", "spec", ")", ":", "# First, check that the model actually has a neural network in it", "try", ":", "layers", "=", "_get_nn_layers", "(", "spec", ")", "except", ":", "raise", "Exception", "(", "'Unable to verify that this model co...
27.848485
25.969697
def _call(self, utterances_batch: list, utterances_ids: Optional[list]=None) -> list: """ Processes batch of utterances and returns corresponding responses batch. Each call of Agent passes incoming utterances batch through skills filter, agent skills, skills processor. Batch of dialog IDs can be provided, in other case utterances indexes in incoming batch are used as dialog IDs. Args: utterances_batch: Batch of incoming utterances. utterances_ids: Batch of dialog IDs corresponding to incoming utterances. Returns: responses: A batch of responses corresponding to the utterance batch received by agent. """ batch_size = len(utterances_batch) ids = utterances_ids or list(range(batch_size)) batch_history = [self.history[utt_id] for utt_id in ids] responses = [] filtered = self.skills_filter(utterances_batch, batch_history) for skill_i, (filtered_utterances, skill) in enumerate(zip(filtered, self.wrapped_skills)): skill_i_utt_indexes = [utt_index for utt_index, utt_filter in enumerate(filtered_utterances) if utt_filter] if skill_i_utt_indexes: skill_i_utt_batch = [utterances_batch[i] for i in skill_i_utt_indexes] skill_i_utt_ids = [ids[i] for i in skill_i_utt_indexes] res = [(None, 0.)] * batch_size predicted, confidence = skill(skill_i_utt_batch, skill_i_utt_ids) for i, predicted, confidence in zip(skill_i_utt_indexes, predicted, confidence): res[i] = (predicted, confidence) responses.append(res) responses = self.skills_processor(utterances_batch, batch_history, *responses) return responses
[ "def", "_call", "(", "self", ",", "utterances_batch", ":", "list", ",", "utterances_ids", ":", "Optional", "[", "list", "]", "=", "None", ")", "->", "list", ":", "batch_size", "=", "len", "(", "utterances_batch", ")", "ids", "=", "utterances_ids", "or", ...
45
31.1
def make_posthook(self): """ Run the post hook into the project directory. """ print(id(self.posthook), self.posthook) print(id(super(self.__class__, self).posthook), super(self.__class__, self).posthook) import ipdb;ipdb.set_trace() if self.posthook: os.chdir(self.project_name) # enter the project main directory self.posthook()
[ "def", "make_posthook", "(", "self", ")", ":", "print", "(", "id", "(", "self", ".", "posthook", ")", ",", "self", ".", "posthook", ")", "print", "(", "id", "(", "super", "(", "self", ".", "__class__", ",", "self", ")", ".", "posthook", ")", ",", ...
48.375
17.75
def cloned_workspace(clone_config, chdir=True): """Create a cloned workspace and yield it. This creates a workspace for a with-block and cleans it up on exit. By default, this will also change to the workspace's `clone_dir` for the duration of the with-block. Args: clone_config: The execution engine configuration to use for the workspace. chdir: Whether to change to the workspace's `clone_dir` before entering the with-block. Yields: The `CloneWorkspace` instance created for the context. """ workspace = ClonedWorkspace(clone_config) original_dir = os.getcwd() if chdir: os.chdir(workspace.clone_dir) try: yield workspace finally: os.chdir(original_dir) workspace.cleanup()
[ "def", "cloned_workspace", "(", "clone_config", ",", "chdir", "=", "True", ")", ":", "workspace", "=", "ClonedWorkspace", "(", "clone_config", ")", "original_dir", "=", "os", ".", "getcwd", "(", ")", "if", "chdir", ":", "os", ".", "chdir", "(", "workspace"...
32.826087
23.565217
def _CheckFileEntryType(self, file_entry): """Checks the file entry type find specifications. Args: file_entry (FileEntry): file entry. Returns: bool: True if the file entry matches the find specification, False if not or None if no file entry type specification is defined. """ if not self._file_entry_types: return None return ( self._CheckIsDevice(file_entry) or self._CheckIsDirectory(file_entry) or self._CheckIsFile(file_entry) or self._CheckIsLink(file_entry) or self._CheckIsPipe(file_entry) or self._CheckIsSocket(file_entry))
[ "def", "_CheckFileEntryType", "(", "self", ",", "file_entry", ")", ":", "if", "not", "self", ".", "_file_entry_types", ":", "return", "None", "return", "(", "self", ".", "_CheckIsDevice", "(", "file_entry", ")", "or", "self", ".", "_CheckIsDirectory", "(", "...
35.176471
24.058824
def authenticated(function): """Re-authenticate if session expired.""" def wrapped(*args): """Wrap function.""" try: return function(*args) except UPSError: _login(*args) return function(*args) return wrapped
[ "def", "authenticated", "(", "function", ")", ":", "def", "wrapped", "(", "*", "args", ")", ":", "\"\"\"Wrap function.\"\"\"", "try", ":", "return", "function", "(", "*", "args", ")", "except", "UPSError", ":", "_login", "(", "*", "args", ")", "return", ...
27.1
12.2
def delete(self): """ Delete this Vlan interface from the parent interface. This will also remove stale routes if the interface has networks associated with it. :return: None """ if self in self._parent.vlan_interface: self._parent.data['vlanInterfaces'] = [ v for v in self._parent.vlan_interface if v != self] self.update() for route in self._parent._engine.routing: if route.to_delete: route.delete()
[ "def", "delete", "(", "self", ")", ":", "if", "self", "in", "self", ".", "_parent", ".", "vlan_interface", ":", "self", ".", "_parent", ".", "data", "[", "'vlanInterfaces'", "]", "=", "[", "v", "for", "v", "in", "self", ".", "_parent", ".", "vlan_int...
34.8125
12.8125
def node_to_nodal_planes(node): """ Parses the nodal plane distribution to a PMF """ if not len(node): return None npd_pmf = [] for plane in node.nodes: if not all(plane.attrib[key] for key in plane.attrib): # One plane fails - return None return None npd = NodalPlane(float(plane.attrib["strike"]), float(plane.attrib["dip"]), float(plane.attrib["rake"])) npd_pmf.append((float(plane.attrib["probability"]), npd)) return PMF(npd_pmf)
[ "def", "node_to_nodal_planes", "(", "node", ")", ":", "if", "not", "len", "(", "node", ")", ":", "return", "None", "npd_pmf", "=", "[", "]", "for", "plane", "in", "node", ".", "nodes", ":", "if", "not", "all", "(", "plane", ".", "attrib", "[", "key...
34.5625
13.5625
def ConvBPDNMask(*args, **kwargs): """A wrapper function that dynamically defines a class derived from one of the implementations of the Convolutional Constrained MOD problems, and returns an object instantiated with the provided parameters. The wrapper is designed to allow the appropriate object to be created by calling this function using the same syntax as would be used if it were a class. The specific implementation is selected by use of an additional keyword argument 'method'. Valid values are: - ``'admm'`` : Use the implementation defined in :class:`.admm.cbpdn.ConvBPDNMaskDcpl`. - ``'fista'`` : Use the implementation defined in :class:`.fista.cbpdn.ConvBPDNMask`. The default value is ``'admm'``. """ # Extract method selection argument or set default method = kwargs.pop('method', 'admm') # Assign base class depending on method selection argument base = cbpdnmsk_class_label_lookup(method) # Nested class with dynamically determined inheritance class ConvBPDNMask(base): def __init__(self, *args, **kwargs): super(ConvBPDNMask, self).__init__(*args, **kwargs) # Allow pickling of objects of type ConvBPDNMask _fix_dynamic_class_lookup(ConvBPDNMask, method) # Return object of the nested class type return ConvBPDNMask(*args, **kwargs)
[ "def", "ConvBPDNMask", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Extract method selection argument or set default", "method", "=", "kwargs", ".", "pop", "(", "'method'", ",", "'admm'", ")", "# Assign base class depending on method selection argument", "bas...
39.558824
19.852941
def authorize_url(self, redirect_uri, state=None): """ ๆž„้€ ็ฝ‘้กตๆŽˆๆƒ้“พๆŽฅ ่ฏฆๆƒ…่ฏทๅ‚่€ƒ https://work.weixin.qq.com/api/doc#90000/90135/91022 :param redirect_uri: ๆŽˆๆƒๅŽ้‡ๅฎšๅ‘็š„ๅ›ž่ฐƒ้“พๆŽฅๅœฐๅ€ :param state: ้‡ๅฎšๅ‘ๅŽไผšๅธฆไธŠ state ๅ‚ๆ•ฐ :return: ่ฟ”ๅ›ž็š„ JSON ๆ•ฐๆฎๅŒ… """ redirect_uri = six.moves.urllib.parse.quote(redirect_uri, safe=b'') url_list = [ self.OAUTH_BASE_URL, '?appid=', self._client.corp_id, '&redirect_uri=', redirect_uri, '&response_type=code&scope=snsapi_base', ] if state: url_list.extend(['&state=', state]) url_list.append('#wechat_redirect') return ''.join(url_list)
[ "def", "authorize_url", "(", "self", ",", "redirect_uri", ",", "state", "=", "None", ")", ":", "redirect_uri", "=", "six", ".", "moves", ".", "urllib", ".", "parse", ".", "quote", "(", "redirect_uri", ",", "safe", "=", "b''", ")", "url_list", "=", "[",...
30.695652
14.521739
def _convert_url_to_downloadable(url): """Convert a url to the proper style depending on its website.""" if 'drive.google.com' in url: # For future support of google drive file_id = url.split('d/')[1].split('/')[0] base_url = 'https://drive.google.com/uc?export=download&id=' out = '{}{}'.format(base_url, file_id) elif 'dropbox.com' in url: if url.endswith('.png'): out = url + '?dl=1' else: out = url.replace('dl=0', 'dl=1') elif 'github.com' in url: out = url.replace('github.com', 'raw.githubusercontent.com') out = out.replace('blob/', '') else: out = url return out
[ "def", "_convert_url_to_downloadable", "(", "url", ")", ":", "if", "'drive.google.com'", "in", "url", ":", "# For future support of google drive", "file_id", "=", "url", ".", "split", "(", "'d/'", ")", "[", "1", "]", ".", "split", "(", "'/'", ")", "[", "0", ...
35.473684
14.578947
def stop(self): """ stops the process triggered by start Setting the shared memory boolean run to false, which should prevent the loop from repeating. Call __cleanup to make sure the process stopped. After that we could trigger start() again. """ if self.is_alive(): self._proc.terminate() if self._proc is not None: self.__cleanup() if self.raise_error: if self._proc.exitcode == 255: raise LoopExceptionError("the loop function return non zero exticode ({})!\n".format(self._proc.exitcode)+ "see log (INFO level) for traceback information") self.pipe_handler.close() self._proc = None
[ "def", "stop", "(", "self", ")", ":", "if", "self", ".", "is_alive", "(", ")", ":", "self", ".", "_proc", ".", "terminate", "(", ")", "if", "self", ".", "_proc", "is", "not", "None", ":", "self", ".", "__cleanup", "(", ")", "if", "self", ".", "...
40.7
20.5
def dfbool2intervals(df,colbool): """ ds contains bool values """ df.index=range(len(df)) intervals=bools2intervals(df[colbool]) for intervali,interval in enumerate(intervals): df.loc[interval[0]:interval[1],f'{colbool} interval id']=intervali df.loc[interval[0]:interval[1],f'{colbool} interval start']=interval[0] df.loc[interval[0]:interval[1],f'{colbool} interval stop']=interval[1] df.loc[interval[0]:interval[1],f'{colbool} interval length']=interval[1]-interval[0]+1 df.loc[interval[0]:interval[1],f'{colbool} interval within index']=range(interval[1]-interval[0]+1) df[f'{colbool} interval index']=df.index return df
[ "def", "dfbool2intervals", "(", "df", ",", "colbool", ")", ":", "df", ".", "index", "=", "range", "(", "len", "(", "df", ")", ")", "intervals", "=", "bools2intervals", "(", "df", "[", "colbool", "]", ")", "for", "intervali", ",", "interval", "in", "e...
49.357143
22.642857
def _fetchChildren(self): '''Fetch and return new child items.''' children = [] # List paths under this directory. paths = [] for name in os.listdir(self.path): paths.append(os.path.normpath(os.path.join(self.path, name))) # Handle collections. collections, remainder = clique.assemble( paths, [clique.PATTERNS['frames']] ) for path in remainder: try: child = ItemFactory(path) except ValueError: pass else: children.append(child) for collection in collections: children.append(Collection(collection)) return children
[ "def", "_fetchChildren", "(", "self", ")", ":", "children", "=", "[", "]", "# List paths under this directory.", "paths", "=", "[", "]", "for", "name", "in", "os", ".", "listdir", "(", "self", ".", "path", ")", ":", "paths", ".", "append", "(", "os", "...
27.115385
18.346154
def generate_markdown_doc(app_name, spec): """Generate Markdown Documentation for the given spec/app name. Args: app_name (str): The name of the application. spec (YapconfSpec): A yapconf specification with sources loaded. Returns (str): A valid, markdown string representation of the documentation for the given specification. """ # Apply standard headers. sections = [ HEADER.format(app_name=app_name), SOURCES_HEADER.format(app_name=app_name) ] # Generate the sources section of the documentation sorted_labels = sorted(list(spec.sources)) for label in sorted_labels: sections.append( _generate_source_section(label, spec.sources[label], app_name) ) # Generate the config section. sections.append(CONFIG_HEADER.format(app_name=app_name)) table_rows, item_sections = _generate_item_sections( _sorted_dict_values(spec.items), app_name ) headers = { 'name': 'Name', 'type': 'Type', 'default': 'Default', 'description': 'Description' } sections.append( build_markdown_table( headers, table_rows, ['name', 'type', 'default', 'description'], ) ) for item_section in item_sections: sections.append(item_section) return '\n'.join([section for section in sections])
[ "def", "generate_markdown_doc", "(", "app_name", ",", "spec", ")", ":", "# Apply standard headers.", "sections", "=", "[", "HEADER", ".", "format", "(", "app_name", "=", "app_name", ")", ",", "SOURCES_HEADER", ".", "format", "(", "app_name", "=", "app_name", "...
27.64
20.56
def number(self, text): """number = digit - "0" . {digit} ;""" self._attempting(text) return concatenation([ exclusion( self.digit, "0" ), zero_or_more( self.digit, ignore_whitespace=False ), ], ignore_whitespace=False)(text).compressed(TokenType.number)
[ "def", "number", "(", "self", ",", "text", ")", ":", "self", ".", "_attempting", "(", "text", ")", "return", "concatenation", "(", "[", "exclusion", "(", "self", ".", "digit", ",", "\"0\"", ")", ",", "zero_or_more", "(", "self", ".", "digit", ",", "i...
24.153846
20
def _get_imagesave_wildcards(self): 'return the wildcard string for the filesave dialog' default_filetype = self.get_default_filetype() filetypes = self.get_supported_filetypes_grouped() sorted_filetypes = filetypes.items() sorted_filetypes.sort() wildcards = [] extensions = [] filter_index = 0 for i, (name, exts) in enumerate(sorted_filetypes): ext_list = ';'.join(['*.%s' % ext for ext in exts]) extensions.append(exts[0]) wildcard = '%s (%s)|%s' % (name, ext_list, ext_list) if default_filetype in exts: filter_index = i wildcards.append(wildcard) wildcards = '|'.join(wildcards) return wildcards, extensions, filter_index
[ "def", "_get_imagesave_wildcards", "(", "self", ")", ":", "default_filetype", "=", "self", ".", "get_default_filetype", "(", ")", "filetypes", "=", "self", ".", "get_supported_filetypes_grouped", "(", ")", "sorted_filetypes", "=", "filetypes", ".", "items", "(", "...
43
11.666667
def get_meta(self, ignore=['uniqueid']): """Dictionary of all meta-tags, with option to ignore certain tags. See all the meta-tag properties that are shared by ALL Parameters. If a given value is 'None', that means that it is not shared among ALL Parameters. To see the different values among the Parameters, you can access that attribute. :parameter list ignore: list of keys to exclude from the returned dictionary :return: an ordered dictionary of tag properties """ return OrderedDict([(k, getattr(self, k)) for k in _meta_fields_twig if k not in ignore])
[ "def", "get_meta", "(", "self", ",", "ignore", "=", "[", "'uniqueid'", "]", ")", ":", "return", "OrderedDict", "(", "[", "(", "k", ",", "getattr", "(", "self", ",", "k", ")", ")", "for", "k", "in", "_meta_fields_twig", "if", "k", "not", "in", "igno...
45.866667
18.533333
def fetch_files(self): """fetch remote files (called after start)""" if not self.to_fetch: return for remote_file, local_file in self.to_fetch: self._fetch_file(remote_file, local_file)
[ "def", "fetch_files", "(", "self", ")", ":", "if", "not", "self", ".", "to_fetch", ":", "return", "for", "remote_file", ",", "local_file", "in", "self", ".", "to_fetch", ":", "self", ".", "_fetch_file", "(", "remote_file", ",", "local_file", ")" ]
38
12.833333
def is_birthday(self, dt=None): """ Check if its the birthday. Compares the date/month values of the two dates. :rtype: bool """ if dt is None: dt = self.now(self.tz) instance = pendulum.instance(dt) return (self.month, self.day) == (instance.month, instance.day)
[ "def", "is_birthday", "(", "self", ",", "dt", "=", "None", ")", ":", "if", "dt", "is", "None", ":", "dt", "=", "self", ".", "now", "(", "self", ".", "tz", ")", "instance", "=", "pendulum", ".", "instance", "(", "dt", ")", "return", "(", "self", ...
25.384615
17.384615
def poly2bez(poly, return_bpoints=False): """Converts a cubic or lower order Polynomial object (or a sequence of coefficients) to a CubicBezier, QuadraticBezier, or Line object as appropriate. If return_bpoints=True then this will instead only return the control points of the corresponding Bezier curve. Note: The inverse operation is available as a method of CubicBezier, QuadraticBezier and Line objects.""" bpoints = polynomial2bezier(poly) if return_bpoints: return bpoints else: return bpoints2bezier(bpoints)
[ "def", "poly2bez", "(", "poly", ",", "return_bpoints", "=", "False", ")", ":", "bpoints", "=", "polynomial2bezier", "(", "poly", ")", "if", "return_bpoints", ":", "return", "bpoints", "else", ":", "return", "bpoints2bezier", "(", "bpoints", ")" ]
46.416667
15.583333
def exists(self, key, **opts): """Return if a key exists in the cache.""" key, store = self._expand_opts(key, opts) data = store.get(key) # Note that we do not actually delete the thing here as the max_age # just for this call may have triggered a False. if not data or self._has_expired(data, opts): return False return True
[ "def", "exists", "(", "self", ",", "key", ",", "*", "*", "opts", ")", ":", "key", ",", "store", "=", "self", ".", "_expand_opts", "(", "key", ",", "opts", ")", "data", "=", "store", ".", "get", "(", "key", ")", "# Note that we do not actually delete th...
42.777778
14.555556
def download(self, bucket, key, fileobj, extra_args=None, subscribers=None): """Downloads a file from S3 :type bucket: str :param bucket: The name of the bucket to download from :type key: str :param key: The name of the key to download from :type fileobj: str or seekable file-like object :param fileobj: The name of a file to download or a seekable file-like object to download. It is recommended to use a filename because file-like objects may result in higher memory usage. :type extra_args: dict :param extra_args: Extra arguments that may be passed to the client operation :type subscribers: list(s3transfer.subscribers.BaseSubscriber) :param subscribers: The list of subscribers to be invoked in the order provided based on the event emit during the process of the transfer request. :rtype: s3transfer.futures.TransferFuture :returns: Transfer future representing the download """ if extra_args is None: extra_args = {} if subscribers is None: subscribers = [] self._validate_all_known_args(extra_args, self.ALLOWED_DOWNLOAD_ARGS) call_args = CallArgs( bucket=bucket, key=key, fileobj=fileobj, extra_args=extra_args, subscribers=subscribers ) extra_main_kwargs = {'io_executor': self._io_executor} if self._bandwidth_limiter: extra_main_kwargs['bandwidth_limiter'] = self._bandwidth_limiter return self._submit_transfer( call_args, DownloadSubmissionTask, extra_main_kwargs)
[ "def", "download", "(", "self", ",", "bucket", ",", "key", ",", "fileobj", ",", "extra_args", "=", "None", ",", "subscribers", "=", "None", ")", ":", "if", "extra_args", "is", "None", ":", "extra_args", "=", "{", "}", "if", "subscribers", "is", "None",...
40.780488
21.414634
def submit_job(self, job_definition, parameters, job_name=None, queue=None): """Wrap submit_job with useful defaults""" if job_name is None: job_name = _random_id() response = self._client.submit_job( jobName=job_name, jobQueue=queue or self.get_active_queue(), jobDefinition=job_definition, parameters=parameters ) return response['jobId']
[ "def", "submit_job", "(", "self", ",", "job_definition", ",", "parameters", ",", "job_name", "=", "None", ",", "queue", "=", "None", ")", ":", "if", "job_name", "is", "None", ":", "job_name", "=", "_random_id", "(", ")", "response", "=", "self", ".", "...
39.090909
11.636364
def Gamma(cls, shape: 'TensorFluent', scale: 'TensorFluent', batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']: '''Returns a TensorFluent for the Gamma sampling op with given shape and scale parameters. Args: shape: The shape parameter of the Gamma distribution. scale: The scale parameter of the Gamma distribution. batch_size: The size of the batch (optional). Returns: The Gamma distribution and a TensorFluent sample drawn from the distribution. Raises: ValueError: If parameters do not have the same scope. ''' if shape.scope != scale.scope: raise ValueError('Gamma distribution: parameters must have same scope!') concentration = shape.tensor rate = 1 / scale.tensor dist = tf.distributions.Gamma(concentration, rate) batch = shape.batch or scale.batch if not batch and batch_size is not None: t = dist.sample(batch_size) batch = True else: t = dist.sample() scope = shape.scope.as_list() return (dist, TensorFluent(t, scope, batch=batch))
[ "def", "Gamma", "(", "cls", ",", "shape", ":", "'TensorFluent'", ",", "scale", ":", "'TensorFluent'", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ")", "->", "Tuple", "[", "Distribution", ",", "'TensorFluent'", "]", ":", "if", "shape...
39.933333
22.333333
def parse_args(self): """ Called from ``gnotty.server.run`` and parses any CLI args provided. Also handles loading settings from the Python module specified with the ``--conf-file`` arg. CLI args take precedence over any settings defined in the Python module defined by ``--conf-file``. """ options, _ = parser.parse_args() file_settings = {} if options.CONF_FILE: execfile(options.CONF_FILE, {}, file_settings) for option in self.option_list: if option.dest: file_value = file_settings.get("GNOTTY_%s" % option.dest, None) # optparse doesn't seem to provide a way to determine if # an option's value was provided as a CLI arg, or if the # default is being used, so we manually check sys.argv, # since provided CLI args should take precedence over # any settings defined in a conf module. flags = option._short_opts + option._long_opts in_argv = set(flags) & set(sys.argv) options_value = getattr(options, option.dest) if file_value and not in_argv: self[option.dest] = file_value elif in_argv: self[option.dest] = options_value else: self[option.dest] = self.get(option.dest, options_value) self.set_max_message_length() self["STATIC_URL"] = "/static/" self["LOG_LEVEL"] = getattr(logging, self["LOG_LEVEL"])
[ "def", "parse_args", "(", "self", ")", ":", "options", ",", "_", "=", "parser", ".", "parse_args", "(", ")", "file_settings", "=", "{", "}", "if", "options", ".", "CONF_FILE", ":", "execfile", "(", "options", ".", "CONF_FILE", ",", "{", "}", ",", "fi...
48.9375
16.5
def around_me_in(self, leaderboard_name, member, **options): ''' Retrieve a page of leaders from the named leaderboard around a given member. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @param options [Hash] Options to be used when retrieving the page from the named leaderboard. @return a page of leaders from the named leaderboard around a given member. Returns an empty array for a non-existent member. ''' reverse_rank_for_member = None if self.order == self.DESC: reverse_rank_for_member = self.redis_connection.zrevrank( leaderboard_name, member) else: reverse_rank_for_member = self.redis_connection.zrank( leaderboard_name, member) if reverse_rank_for_member is None: return [] page_size = options.get('page_size', self.page_size) starting_offset = reverse_rank_for_member - (page_size // 2) if starting_offset < 0: starting_offset = 0 ending_offset = (starting_offset + page_size) - 1 raw_leader_data = self._range_method( self.redis_connection, leaderboard_name, int(starting_offset), int(ending_offset), withscores=False) return self._parse_raw_members( leaderboard_name, raw_leader_data, **options)
[ "def", "around_me_in", "(", "self", ",", "leaderboard_name", ",", "member", ",", "*", "*", "options", ")", ":", "reverse_rank_for_member", "=", "None", "if", "self", ".", "order", "==", "self", ".", "DESC", ":", "reverse_rank_for_member", "=", "self", ".", ...
38.131579
22.447368
def sent2features(sentence, template): """ extract features in a sentence :type sentence: list of token, each token is a list of tag """ return [word2features(sentence, i, template) for i in range(len(sentence))]
[ "def", "sent2features", "(", "sentence", ",", "template", ")", ":", "return", "[", "word2features", "(", "sentence", ",", "i", ",", "template", ")", "for", "i", "in", "range", "(", "len", "(", "sentence", ")", ")", "]" ]
37.333333
17.166667
def iter_detector_clss(): """Iterate over all of the detectors that are included in this sub-package. This is a convenience method for capturing all new Detectors that are added over time and it is used both by the unit tests and in the ``Scrubber.__init__`` method. """ return iter_subclasses( os.path.dirname(os.path.abspath(__file__)), Detector, _is_abstract_detector, )
[ "def", "iter_detector_clss", "(", ")", ":", "return", "iter_subclasses", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", ",", "Detector", ",", "_is_abstract_detector", ",", ")" ]
37.727273
15.909091
def get_kernel_loop_nest(self): """Return kernel loop nest including any preceding pragmas and following swaps.""" loop_nest = [s for s in self.kernel_ast.block_items if type(s) in [c_ast.For, c_ast.Pragma, c_ast.FuncCall]] assert len(loop_nest) >= 1, "Found to few for statements in kernel" return loop_nest
[ "def", "get_kernel_loop_nest", "(", "self", ")", ":", "loop_nest", "=", "[", "s", "for", "s", "in", "self", ".", "kernel_ast", ".", "block_items", "if", "type", "(", "s", ")", "in", "[", "c_ast", ".", "For", ",", "c_ast", ".", "Pragma", ",", "c_ast",...
59.333333
19.333333
def all_leaders_from(self, leaderboard_name, **options): ''' Retrieves all leaders from the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param options [Hash] Options to be used when retrieving the leaders from the named leaderboard. @return the named leaderboard. ''' raw_leader_data = self._range_method( self.redis_connection, leaderboard_name, 0, -1, withscores=False) return self._parse_raw_members( leaderboard_name, raw_leader_data, **options)
[ "def", "all_leaders_from", "(", "self", ",", "leaderboard_name", ",", "*", "*", "options", ")", ":", "raw_leader_data", "=", "self", ".", "_range_method", "(", "self", ".", "redis_connection", ",", "leaderboard_name", ",", "0", ",", "-", "1", ",", "withscore...
46.666667
23.5
def encode_multipart_formdata_stream(fields, boundary=None): """ Encode a dictionary of ``fields`` using the multipart/form-data MIME format. :param fields: Dictionary of fields or list of (key, value) or (key, value, MIME type) field tuples. The key is treated as the field name, and the value as the body of the form-data bytes. If the value is a tuple of two elements, then the first element is treated as the filename of the form-data section and a suitable MIME type is guessed based on the filename. If the value is a tuple of three elements, then the third element is treated as an explicit MIME type of the form-data section. Field names and filenames must be unicode. :param boundary: If not specified, then a random boundary will be generated using :func:`mimetools.choose_boundary`. """ body = [] def body_write(item): if isinstance(item, six.binary_type): item = BytesIO(item) elif isinstance(item, six.text_type): item = StringIO(item) body.append(item) body_write_encode = lambda item: body.append(BytesIO(item.encode('utf-8'))) if boundary is None: boundary = choose_boundary() for fieldname, value in iter_fields(fields): body_write_encode('--%s\r\n' % (boundary)) if isinstance(value, tuple): if len(value) == 3: filename, data, content_type = value else: filename, data = value from mimetypes import guess_type content_type, _ = guess_type(filename) if content_type is None: content_type = 'application/octet-stream' body_write_encode('Content-Disposition: form-data; name="%s"; ' 'filename="%s"\r\n' % (fieldname, filename)) body_write_encode('Content-Type: %s\r\n\r\n' % (content_type,)) else: data = value body_write_encode('Content-Disposition: form-data; name="%s"\r\n' % (fieldname)) body_write(b'\r\n') if isinstance(data, six.integer_types): data = six.text_type(data) # Backwards compatibility if isinstance(data, six.text_type): body_write_encode(data) else: body_write(data) body_write(b'\r\n') body_write_encode('--%s--\r\n' % (boundary)) content_type = 'multipart/form-data; boundary=%s' % (boundary) return body, content_type
[ "def", "encode_multipart_formdata_stream", "(", "fields", ",", "boundary", "=", "None", ")", ":", "body", "=", "[", "]", "def", "body_write", "(", "item", ")", ":", "if", "isinstance", "(", "item", ",", "six", ".", "binary_type", ")", ":", "item", "=", ...
36.557143
21.842857
def quokka_polygons(size=None, extract=None): """ Returns example polygons on the standard example quokke image. The result contains one polygon, covering the quokka's outline. Parameters ---------- size : None or float or tuple of int or tuple of float, optional Size of the output image on which the polygons are placed. If None, then the polygons are not projected to any new size (positions on the original image are used). Floats lead to relative size changes, ints to absolute sizes in pixels. extract : None or 'square' or tuple of number or imgaug.BoundingBox or \ imgaug.BoundingBoxesOnImage Subarea to extract from the image. See :func:`imgaug.quokka`. Returns ------- psoi : imgaug.PolygonsOnImage Example polygons on the quokka image. """ # TODO get rid of this deferred import from imgaug.augmentables.polys import Polygon, PolygonsOnImage left, top = 0, 0 if extract is not None: bb_extract = _quokka_normalize_extract(extract) left = bb_extract.x1 top = bb_extract.y1 with open(QUOKKA_ANNOTATIONS_FP, "r") as f: json_dict = json.load(f) polygons = [] for poly_json in json_dict["polygons"]: polygons.append( Polygon([(point["x"] - left, point["y"] - top) for point in poly_json["keypoints"]]) ) if extract is not None: shape = (bb_extract.height, bb_extract.width, 3) else: shape = (643, 960, 3) psoi = PolygonsOnImage(polygons, shape=shape) if size is not None: shape_resized = _compute_resized_shape(shape, size) psoi = psoi.on(shape_resized) return psoi
[ "def", "quokka_polygons", "(", "size", "=", "None", ",", "extract", "=", "None", ")", ":", "# TODO get rid of this deferred import", "from", "imgaug", ".", "augmentables", ".", "polys", "import", "Polygon", ",", "PolygonsOnImage", "left", ",", "top", "=", "0", ...
34.591837
20.265306
def _load_meta(self, size, md5): """Set key attributes to retrived metadata. Might be extended in the future to support more attributes. """ if not hasattr(self, 'local_hashes'): self.local_hashes = {} self.size = int(size) if (re.match('^[a-fA-F0-9]{32}$', md5)): self.md5 = md5
[ "def", "_load_meta", "(", "self", ",", "size", ",", "md5", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'local_hashes'", ")", ":", "self", ".", "local_hashes", "=", "{", "}", "self", ".", "size", "=", "int", "(", "size", ")", "if", "(", "...
31.181818
12.181818
def _set_menu_toggles(self): """Enable menu bar view item checkmarks""" toggles = [ (self.main_toolbar, "main_window_toolbar", _("Main toolbar")), (self.macro_toolbar, "macro_toolbar", _("Macro toolbar")), (self.macro_panel, "macro_panel", _("Macro panel")), (self.attributes_toolbar, "attributes_toolbar", _("Format toolbar")), (self.find_toolbar, "find_toolbar", _("Find toolbar")), (self.widget_toolbar, "widget_toolbar", _("Widget toolbar")), (self.entry_line_panel, "entry_line_panel", _("Entry line")), (self.table_list_panel, "table_list_panel", _("Table list")), ] for toolbar, pane_name, toggle_label in toggles: # Get pane from aui manager pane = self._mgr.GetPane(pane_name) # Get menu item to toggle toggle_id = self.menubar.FindMenuItem(_("View"), toggle_label) if toggle_id != -1: # Check may fail if translation is incomplete toggle_item = self.menubar.FindItemById(toggle_id) # Adjust toggle to pane visibility toggle_item.Check(pane.IsShown())
[ "def", "_set_menu_toggles", "(", "self", ")", ":", "toggles", "=", "[", "(", "self", ".", "main_toolbar", ",", "\"main_window_toolbar\"", ",", "_", "(", "\"Main toolbar\"", ")", ")", ",", "(", "self", ".", "macro_toolbar", ",", "\"macro_toolbar\"", ",", "_",...
44.555556
22.185185
def cache_dir(self, path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None, cachedir=None): ''' Download all of the files in a subdir of the master ''' ret = [] path = self._check_proto(salt.utils.data.decode(path)) # We want to make sure files start with this *directory*, use # '/' explicitly because the master (that's generating the # list of files) only runs on POSIX if not path.endswith('/'): path = path + '/' log.info( 'Caching directory \'%s\' for environment \'%s\'', path, saltenv ) # go through the list of all files finding ones that are in # the target directory and caching them for fn_ in self.file_list(saltenv): fn_ = salt.utils.data.decode(fn_) if fn_.strip() and fn_.startswith(path): if salt.utils.stringutils.check_include_exclude( fn_, include_pat, exclude_pat): fn_ = self.cache_file( salt.utils.url.create(fn_), saltenv, cachedir=cachedir) if fn_: ret.append(fn_) if include_empty: # Break up the path into a list containing the bottom-level # directory (the one being recursively copied) and the directories # preceding it # separated = string.rsplit(path, '/', 1) # if len(separated) != 2: # # No slashes in path. (So all files in saltenv will be copied) # prefix = '' # else: # prefix = separated[0] cachedir = self.get_cachedir(cachedir) dest = salt.utils.path.join(cachedir, 'files', saltenv) for fn_ in self.file_list_emptydirs(saltenv): fn_ = salt.utils.data.decode(fn_) if fn_.startswith(path): minion_dir = '{0}/{1}'.format(dest, fn_) if not os.path.isdir(minion_dir): os.makedirs(minion_dir) ret.append(minion_dir) return ret
[ "def", "cache_dir", "(", "self", ",", "path", ",", "saltenv", "=", "'base'", ",", "include_empty", "=", "False", ",", "include_pat", "=", "None", ",", "exclude_pat", "=", "None", ",", "cachedir", "=", "None", ")", ":", "ret", "=", "[", "]", "path", "...
42.66
19.22
def _flow_check_handler_internal(self): """Periodic handler to check if installed flows are present. This handler runs periodically to check if installed flows are present. This function cannot detect and delete the stale flows, if present. It requires more complexity to delete stale flows. Generally, stale flows are not present. So, that logic is not put here. """ integ_flow = self.integ_br_obj.dump_flows_for( in_port=self.int_peer_port_num) ext_flow = self.ext_br_obj.dump_flows_for( in_port=self.phy_peer_port_num) for net_uuid, lvm in six.iteritems(self.local_vlan_map): vdp_vlan = lvm.any_consistent_vlan() flow_required = False if not (vdp_vlan and ovs_lib.is_valid_vlan_tag(vdp_vlan)): return if not self._check_bridge_flow(integ_flow, vdp_vlan, lvm.lvid): LOG.error("Flow for VDP Vlan %(vdp_vlan)s, Local vlan " "%(lvid)s not present on Integ bridge", {'vdp_vlan': vdp_vlan, 'lvid': lvm.lvid}) flow_required = True if not self._check_bridge_flow(ext_flow, lvm.lvid, vdp_vlan): LOG.error("Flow for VDP Vlan %(vdp_vlan)s, Local vlan " "%(lvid)s not present on External bridge", {'vdp_vlan': vdp_vlan, 'lvid': lvm.lvid}) flow_required = True if flow_required: LOG.info("Programming flows for lvid %(lvid)s vdp vlan" " %(vdp)s", {'lvid': lvm.lvid, 'vdp': vdp_vlan}) self.program_vm_ovs_flows(lvm.lvid, 0, vdp_vlan)
[ "def", "_flow_check_handler_internal", "(", "self", ")", ":", "integ_flow", "=", "self", ".", "integ_br_obj", ".", "dump_flows_for", "(", "in_port", "=", "self", ".", "int_peer_port_num", ")", "ext_flow", "=", "self", ".", "ext_br_obj", ".", "dump_flows_for", "(...
53.9375
19.53125
def sync_ldap_groups(self, ldap_groups): """Synchronize LDAP groups with local group model.""" groupname_field = 'name' self.stats_group_total = len(ldap_groups) for cname, ldap_attributes in ldap_groups: defaults = {} try: for name, attribute in ldap_attributes.items(): defaults[self.conf_LDAP_SYNC_GROUP_ATTRIBUTES[name]] = attribute[0].decode('utf-8') except AttributeError: # In some cases attrs is a list instead of a dict; skip these invalid groups continue try: groupname = defaults[groupname_field] except KeyError: logger.warning("Group is missing a required attribute '%s'" % groupname_field) self.stats_group_errors += 1 continue kwargs = { groupname_field + '__iexact': groupname, 'defaults': defaults, } try: group, created = Group.objects.get_or_create(**kwargs) except (IntegrityError, DataError) as e: logger.error("Error creating group %s: %s" % (groupname, e)) self.stats_group_errors += 1 else: if created: self.stats_group_added += 1 logger.debug("Created group %s" % groupname) logger.info("Groups are synchronized")
[ "def", "sync_ldap_groups", "(", "self", ",", "ldap_groups", ")", ":", "groupname_field", "=", "'name'", "self", ".", "stats_group_total", "=", "len", "(", "ldap_groups", ")", "for", "cname", ",", "ldap_attributes", "in", "ldap_groups", ":", "defaults", "=", "{...
39.861111
20.888889
def calcHairpin(seq, mv_conc=50.0, dv_conc=0.0, dntp_conc=0.8, dna_conc=50.0, temp_c=37, max_loop=30): ''' Calculate the hairpin formation thermodynamics of a DNA sequence. **Note that the maximum length of `seq` is 60 bp.** This is a cap suggested by the Primer3 team as the longest reasonable sequence length for which a two-state NN model produces reliable results (see primer3/src/libnano/thal.h:50). Args: seq (str): DNA sequence to analyze for hairpin formation mv_conc (float/int, optional): Monovalent cation conc. (mM) dv_conc (float/int, optional): Divalent cation conc. (mM) dntp_conc (float/int, optional): dNTP conc. (mM) dna_conc (float/int, optional): DNA conc. (nM) temp_c (int, optional): Simulation temperature for dG (Celsius) max_loop(int, optional): Maximum size of loops in the structure Returns: A `ThermoResult` object with thermodynamic characteristics of the hairpin formation. Raises: ``RuntimeError`` ''' _setThermoArgs(**locals()) return _THERMO_ANALYSIS.calcHairpin(seq).checkExc()
[ "def", "calcHairpin", "(", "seq", ",", "mv_conc", "=", "50.0", ",", "dv_conc", "=", "0.0", ",", "dntp_conc", "=", "0.8", ",", "dna_conc", "=", "50.0", ",", "temp_c", "=", "37", ",", "max_loop", "=", "30", ")", ":", "_setThermoArgs", "(", "*", "*", ...
40.214286
28.857143
def get_neighbors_in_shell(self, origin, r, dr, include_index=False, include_image=False): """ Returns all sites in a shell centered on origin (coords) between radii r-dr and r+dr. Args: origin (3x1 array): Cartesian coordinates of center of sphere. r (float): Inner radius of shell. dr (float): Width of shell. include_index (bool): Whether to include the non-supercell site in the returned data include_image (bool): Whether to include the supercell image in the returned data Returns: [(site, dist, index) ...] since most of the time, subsequent processing requires the distance. Index only supplied if include_index = True. The index is the index of the site in the original (non-supercell) structure. This is needed for ewaldmatrix by keeping track of which sites contribute to the ewald sum. Image only supplied if include_image = True """ outer = self.get_sites_in_sphere(origin, r + dr, include_index=include_index, include_image=include_image) inner = r - dr return [t for t in outer if t[1] > inner]
[ "def", "get_neighbors_in_shell", "(", "self", ",", "origin", ",", "r", ",", "dr", ",", "include_index", "=", "False", ",", "include_image", "=", "False", ")", ":", "outer", "=", "self", ".", "get_sites_in_sphere", "(", "origin", ",", "r", "+", "dr", ",",...
46.928571
22.857143
def update_model(self): ''' a method to update model with latest training data :return: True ''' import requests url = self.endpoint_public + '/calculate' params = { 'group': self.group_name } response = requests.get(url, params=params) response_details = response.json() return response_details['success']
[ "def", "update_model", "(", "self", ")", ":", "import", "requests", "url", "=", "self", ".", "endpoint_public", "+", "'/calculate'", "params", "=", "{", "'group'", ":", "self", ".", "group_name", "}", "response", "=", "requests", ".", "get", "(", "url", ...
25.647059
19.882353
def police_priority_map_exceed_map_pri7_exceed(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer") name_key = ET.SubElement(police_priority_map, "name") name_key.text = kwargs.pop('name') exceed = ET.SubElement(police_priority_map, "exceed") map_pri7_exceed = ET.SubElement(exceed, "map-pri7-exceed") map_pri7_exceed.text = kwargs.pop('map_pri7_exceed') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "police_priority_map_exceed_map_pri7_exceed", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "police_priority_map", "=", "ET", ".", "SubElement", "(", "config", ",", "\"police-priority-map\"", ...
49.153846
20.153846
def do_run(self, arg): """run [args...] Restart the debugged python program. If a string is supplied it is splitted with "shlex", and the result is used as the new sys.argv. History, breakpoints, actions and debugger options are preserved. "restart" is an alias for "run". """ if arg: argv0 = sys.argv[0:1] sys.argv = shlex.split(arg) sys.argv[:0] = argv0 # this is caught in the main debugger loop raise Restart
[ "def", "do_run", "(", "self", ",", "arg", ")", ":", "if", "arg", ":", "argv0", "=", "sys", ".", "argv", "[", "0", ":", "1", "]", "sys", ".", "argv", "=", "shlex", ".", "split", "(", "arg", ")", "sys", ".", "argv", "[", ":", "0", "]", "=", ...
39.230769
14.692308
def build_spotify_api(): """Build the Spotify API for future use""" data = datatools.get_data() if "spotify_client_id" not in data["discord"]["keys"]: logger.warning("No API key found with name 'spotify_client_id'") logger.info("Please add your Spotify client id with name 'spotify_client_id' " "in data.json to use Spotify features of the music module") return False if "spotify_client_secret" not in data["discord"]["keys"]: logger.warning("No API key found with name 'spotify_client_secret'") logger.info("Please add your Spotify client secret with name 'spotify_client_secret' " "in data.json to use Spotify features of the music module") return False try: global spclient client_credentials_manager = SpotifyClientCredentials( data["discord"]["keys"]["spotify_client_id"], data["discord"]["keys"]["spotify_client_secret"]) spclient = spotipy.Spotify(client_credentials_manager=client_credentials_manager) logger.debug("Spotify build successful") return True except Exception as e: logger.exception(e) return False
[ "def", "build_spotify_api", "(", ")", ":", "data", "=", "datatools", ".", "get_data", "(", ")", "if", "\"spotify_client_id\"", "not", "in", "data", "[", "\"discord\"", "]", "[", "\"keys\"", "]", ":", "logger", ".", "warning", "(", "\"No API key found with name...
47.8
25.32
def cli(env, identifier): """List server credentials.""" manager = SoftLayer.HardwareManager(env.client) hardware_id = helpers.resolve_id(manager.resolve_ids, identifier, 'hardware') instance = manager.get_hardware(hardware_id) table = formatting.Table(['username', 'password']) for item in instance['softwareComponents']: if 'passwords' not in item: raise exceptions.SoftLayerError("No passwords found in softwareComponents") for credentials in item['passwords']: table.add_row([credentials.get('username', 'None'), credentials.get('password', 'None')]) env.fout(table)
[ "def", "cli", "(", "env", ",", "identifier", ")", ":", "manager", "=", "SoftLayer", ".", "HardwareManager", "(", "env", ".", "client", ")", "hardware_id", "=", "helpers", ".", "resolve_id", "(", "manager", ".", "resolve_ids", ",", "identifier", ",", "'hard...
43.6875
19.1875
def hostname(self, levels=1): """ Produce a hostname with specified number of subdomain levels. >>> hostname() db-01.nichols-phillips.com >>> hostname(0) laptop-56 >>> hostname(2) web-12.williamson-hopkins.jackson.com """ if levels < 1: return self.random_element(self.hostname_prefixes) + '-' + self.numerify('##') return self.random_element(self.hostname_prefixes) + '-' + self.numerify('##') + '.' + self.domain_name(levels)
[ "def", "hostname", "(", "self", ",", "levels", "=", "1", ")", ":", "if", "levels", "<", "1", ":", "return", "self", ".", "random_element", "(", "self", ".", "hostname_prefixes", ")", "+", "'-'", "+", "self", ".", "numerify", "(", "'##'", ")", "return...
36.785714
22.357143
def describe_listeners(load_balancer_arn=None, listener_arns=None, client=None): """ Permission: elasticloadbalancing:DescribeListeners """ kwargs = dict() if load_balancer_arn: kwargs.update(dict(LoadBalancerArn=load_balancer_arn)) if listener_arns: kwargs.update(dict(ListenerArns=listener_arns)) return client.describe_listeners(**kwargs)
[ "def", "describe_listeners", "(", "load_balancer_arn", "=", "None", ",", "listener_arns", "=", "None", ",", "client", "=", "None", ")", ":", "kwargs", "=", "dict", "(", ")", "if", "load_balancer_arn", ":", "kwargs", ".", "update", "(", "dict", "(", "LoadBa...
37.6
15.2
def send(self, data, flags=0): """Send data to the socket. The socket must be connected to a remote socket. Returns a boolean value that indicates success or failure. A false value is typically an indication that the socket or connection was closed. """ return self.llc.send(self._tco, data, flags)
[ "def", "send", "(", "self", ",", "data", ",", "flags", "=", "0", ")", ":", "return", "self", ".", "llc", ".", "send", "(", "self", ".", "_tco", ",", "data", ",", "flags", ")" ]
42.5
14.125
def cli(env, identifier): """Cancel all virtual guests of the dedicated host immediately. Use the 'slcli vs cancel' command to cancel an specific guest """ dh_mgr = SoftLayer.DedicatedHostManager(env.client) host_id = helpers.resolve_id(dh_mgr.resolve_ids, identifier, 'dedicated host') if not (env.skip_confirmations or formatting.no_going_back(host_id)): raise exceptions.CLIAbort('Aborted') table = formatting.Table(['id', 'server name', 'status']) result = dh_mgr.cancel_guests(host_id) if result: for status in result: table.add_row([ status['id'], status['fqdn'], status['status'] ]) env.fout(table) else: click.secho('There is not any guest into the dedicated host %s' % host_id, fg='red')
[ "def", "cli", "(", "env", ",", "identifier", ")", ":", "dh_mgr", "=", "SoftLayer", ".", "DedicatedHostManager", "(", "env", ".", "client", ")", "host_id", "=", "helpers", ".", "resolve_id", "(", "dh_mgr", ".", "resolve_ids", ",", "identifier", ",", "'dedic...
29.428571
24.428571
def parse(cls, rule_string): """ returns a list of rules a single line may yield multiple rules """ result = parser.parseString(rule_string) rules = [] # breakout port ranges into multple rules kwargs = {} kwargs['address'] = result.ip_and_mask or None kwargs['group'] = result.security_group or None kwargs['group_name'] = result.group_name or None for x,y in result.ports: r = Rule(result.protocol, x, y, **kwargs) rules.append(r) return rules
[ "def", "parse", "(", "cls", ",", "rule_string", ")", ":", "result", "=", "parser", ".", "parseString", "(", "rule_string", ")", "rules", "=", "[", "]", "# breakout port ranges into multple rules", "kwargs", "=", "{", "}", "kwargs", "[", "'address'", "]", "="...
29.368421
16.105263
def clean(ctx, check, compiled_only, all_matches, force, verbose): """Removes a project's build artifacts. If `check` is not specified, the current working directory will be used. All `*.pyc`/`*.pyd`/`*.pyo`/`*.whl` files and `__pycache__` directories will be removed. Additionally, the following patterns will be removed from the root of the path: `.cache`, `.coverage`, `.eggs`, `.pytest_cache`, `.tox`, `build`, `dist`, and `*.egg-info`. """ force_clean_root = False if check: path = resolve_path(os.path.join(get_root(), check)) if not dir_exists(path): abort( 'Directory `{}` does not exist. Be sure to `ddev config set {repo} ' 'path/to/integrations-{repo}`.'.format(path, repo=ctx.obj['repo_choice']) ) else: path = os.getcwd() if basepath(path) in ('integrations-core', 'integrations-extras'): if force: force_clean_root = True else: echo_warning( 'You are running this from the root of the integrations project. ' 'Should we remove everything, including: ' '.cache, .coverage, .eggs, .pytest_cache, .tox, build, dist, and *.egg-info? ' 'You can also use --force or -f to bypass this input.' ) force_clean_root = click.confirm('Do you want to continue?') echo_waiting('Cleaning `{}`...'.format(path)) if compiled_only: removed_paths = remove_compiled_scripts(path, detect_project=not all_matches) else: removed_paths = clean_package(path, detect_project=not all_matches, force_clean_root=force_clean_root) if verbose: if removed_paths: echo_success('Removed paths:') for p in removed_paths: echo_info(' {}'.format(p)) if removed_paths: echo_success('Cleaned!') else: echo_success('Already clean!')
[ "def", "clean", "(", "ctx", ",", "check", ",", "compiled_only", ",", "all_matches", ",", "force", ",", "verbose", ")", ":", "force_clean_root", "=", "False", "if", "check", ":", "path", "=", "resolve_path", "(", "os", ".", "path", ".", "join", "(", "ge...
40.122449
26.204082
def get_conf_from_module(mod): """return configuration from module with defaults no worry about None type """ conf = ModuleConfig(CONF_SPEC) # get imported module mod = _get_correct_module(mod) conf.set_module(mod) # extarct from default object or from module if hasattr(mod, 'default'): default = mod.default conf = extract_conf_from(default, conf) else: conf = extract_conf_from(mod, conf) return conf
[ "def", "get_conf_from_module", "(", "mod", ")", ":", "conf", "=", "ModuleConfig", "(", "CONF_SPEC", ")", "# get imported module", "mod", "=", "_get_correct_module", "(", "mod", ")", "conf", ".", "set_module", "(", "mod", ")", "# extarct from default object or from m...
23.894737
18.263158
def split_user_input(line, pattern=None): """Split user input into initial whitespace, escape character, function part and the rest. """ # We need to ensure that the rest of this routine deals only with unicode encoding = get_stream_enc(sys.stdin, 'utf-8') line = py3compat.cast_unicode(line, encoding) if pattern is None: pattern = line_split match = pattern.match(line) if not match: # print "match failed for line '%s'" % line try: ifun, the_rest = line.split(None,1) except ValueError: # print "split failed for line '%s'" % line ifun, the_rest = line, u'' pre = re.match('^(\s*)(.*)',line).groups()[0] esc = "" else: pre, esc, ifun, the_rest = match.groups() #print 'line:<%s>' % line # dbg #print 'pre <%s> ifun <%s> rest <%s>' % (pre,ifun.strip(),the_rest) # dbg return pre, esc or '', ifun.strip(), the_rest.lstrip()
[ "def", "split_user_input", "(", "line", ",", "pattern", "=", "None", ")", ":", "# We need to ensure that the rest of this routine deals only with unicode", "encoding", "=", "get_stream_enc", "(", "sys", ".", "stdin", ",", "'utf-8'", ")", "line", "=", "py3compat", ".",...
36.346154
16.692308
def editorData( self, editor ): """ Pulls the value from the inputed editor. :param editor | <QWidget> :return <variant> """ # set the information from a multi-tag edit if ( isinstance(editor, XMultiTagEdit) ): return editor.tags() # set the information from a combo box elif ( isinstance(editor, QComboBox) ): return nativestring(editor.currentText()) # set the information from a line edit elif ( isinstance(editor, QLineEdit) ): return nativestring(editor.text()) return None
[ "def", "editorData", "(", "self", ",", "editor", ")", ":", "# set the information from a multi-tag edit\r", "if", "(", "isinstance", "(", "editor", ",", "XMultiTagEdit", ")", ")", ":", "return", "editor", ".", "tags", "(", ")", "# set the information from a combo bo...
31.619048
13.52381
def interpolate_to_timestep(self, timestep, cumulative=None): """Interpolate data for a finer timestep using a linear interpolation. Args: timestep: Target timestep as an integer. Target timestep must be divisable by current timestep. cumulative: A boolean that sets whether the interpolation should treat the data colection values as cumulative, in which case the value at each timestep is the value over that timestep (instead of over the hour). The default will check the DataType to see if this type of data is typically cumulative over time. Return: A continuous hourly data collection with data interpolated to the input timestep. """ assert timestep % self.header.analysis_period.timestep == 0, \ 'Target timestep({}) must be divisable by current timestep({})' \ .format(timestep, self.header.analysis_period.timestep) if cumulative is not None: assert isinstance(cumulative, bool), \ 'Expected Boolean. Got {}'.format(type(cumulative)) # generate new data _new_values = [] _data_length = len(self._values) for d in xrange(_data_length): for _v in self._xxrange(self[d], self[(d + 1) % _data_length], timestep): _new_values.append(_v) # divide cumulative values by the timestep native_cumulative = self.header.data_type.cumulative if cumulative is True or (cumulative is None and native_cumulative): for i, d in enumerate(_new_values): _new_values[i] = d / timestep # shift data by a half-hour if data is averaged or cumulative over an hour if self.header.data_type.point_in_time is False: shift_dist = int(timestep / 2) _new_values = _new_values[-shift_dist:] + _new_values[:-shift_dist] # build a new header a_per = self.header.analysis_period _new_a_per = AnalysisPeriod(a_per.st_month, a_per.st_day, a_per.st_hour, a_per.end_month, a_per.end_day, a_per.end_hour, timestep, a_per.is_leap_year) _new_header = self.header.duplicate() _new_header._analysis_period = _new_a_per return HourlyContinuousCollection(_new_header, _new_values)
[ "def", "interpolate_to_timestep", "(", "self", ",", "timestep", ",", "cumulative", "=", "None", ")", ":", "assert", "timestep", "%", "self", ".", "header", ".", "analysis_period", ".", "timestep", "==", "0", ",", "'Target timestep({}) must be divisable by current ti...
48.4
22.26
def libvlc_media_new_callbacks(instance, open_cb, read_cb, seek_cb, close_cb, opaque): '''Create a media with custom callbacks to read the data from. @param instance: LibVLC instance. @param open_cb: callback to open the custom bitstream input media. @param read_cb: callback to read data (must not be NULL). @param seek_cb: callback to seek, or NULL if seeking is not supported. @param close_cb: callback to close the media, or NULL if unnecessary. @param opaque: data pointer for the open callback. @return: the newly created media or NULL on error @note If open_cb is NULL, the opaque pointer will be passed to read_cb, seek_cb and close_cb, and the stream size will be treated as unknown. @note The callbacks may be called asynchronously (from another thread). A single stream instance need not be reentrant. However the open_cb needs to be reentrant if the media is used by multiple player instances. @warning The callbacks may be used until all or any player instances that were supplied the media item are stopped. See L{libvlc_media_release}. @version: LibVLC 3.0.0 and later. ''' f = _Cfunctions.get('libvlc_media_new_callbacks', None) or \ _Cfunction('libvlc_media_new_callbacks', ((1,), (1,), (1,), (1,), (1,), (1,),), class_result(Media), ctypes.c_void_p, Instance, MediaOpenCb, MediaReadCb, MediaSeekCb, MediaCloseCb, ctypes.c_void_p) return f(instance, open_cb, read_cb, seek_cb, close_cb, opaque)
[ "def", "libvlc_media_new_callbacks", "(", "instance", ",", "open_cb", ",", "read_cb", ",", "seek_cb", ",", "close_cb", ",", "opaque", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_media_new_callbacks'", ",", "None", ")", "or", "_Cfunction", "(",...
98.2
63.4
def seek(self, value): """ Seek to the specified time. Parameters ---------- value : str or int The time to seek to. Can be any of the following formats: >>> 15.4 -> 15.4 # seconds >>> (1,21.5) -> 81.5 # (min,sec) >>> (1,1,2) -> 3662 # (hr, min, sec) >>> '01:01:33.5' -> 3693.5 #(hr,min,sec) >>> '01:01:33.045' -> 3693.045 >>> '01:01:33,5' #comma works too """ # Pause the stream self.pause() # Make sure the movie starts at 1s as 0s gives trouble. self.clock.time = max(0.5, value) logger.debug("Seeking to {} seconds; frame {}".format(self.clock.time, self.clock.current_frame)) if self.audioformat: self.__calculate_audio_frames() # Resume the stream self.pause()
[ "def", "seek", "(", "self", ",", "value", ")", ":", "# Pause the stream", "self", ".", "pause", "(", ")", "# Make sure the movie starts at 1s as 0s gives trouble.", "self", ".", "clock", ".", "time", "=", "max", "(", "0.5", ",", "value", ")", "logger", ".", ...
28.68
15.96
def unselect(self, value=None, field=None, **kwargs): """ Find a select box on the page and unselect a particular option from it. If the select box is a multiple select, ``unselect`` can be called multiple times to unselect more than one option. The select box can be found via its name, id, or label text. :: page.unselect("March", field="Month") Args: value (str, optional): Which option to unselect. field (str, optional): The id, name, or label of the select box. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. """ if field: self.find("select", field, **kwargs).find("option", value, **kwargs).unselect_option() else: self.find("option", value, **kwargs).unselect_option()
[ "def", "unselect", "(", "self", ",", "value", "=", "None", ",", "field", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "field", ":", "self", ".", "find", "(", "\"select\"", ",", "field", ",", "*", "*", "kwargs", ")", ".", "find", "(", "...
45.388889
30.5
def add_key_value(self, key, value): """ Converts the value and adds it as a data field. Args: key: value: """ if key == 'unique_id': self._unique_id = str(value) else: self._data[key] = value
[ "def", "add_key_value", "(", "self", ",", "key", ",", "value", ")", ":", "if", "key", "==", "'unique_id'", ":", "self", ".", "_unique_id", "=", "str", "(", "value", ")", "else", ":", "self", ".", "_data", "[", "key", "]", "=", "value" ]
23.166667
14.5
def cancelPendingResultsFor( self, ps ): """Cancel all pending results for the given parameters. Note that this only affects the notebook's record, not any job running in a lab. :param ps: the parameters""" k = self._parametersAsIndex(ps) if k in self._results.keys(): # remove from results rs = self._results[k] js = [ j for j in rs if not isinstance(j, dict) ] self._results[k] = [ rc for rc in rs if isinstance(rc, dict) ] # ...and from pending jobs list for j in js: del self._pending[j]
[ "def", "cancelPendingResultsFor", "(", "self", ",", "ps", ")", ":", "k", "=", "self", ".", "_parametersAsIndex", "(", "ps", ")", "if", "k", "in", "self", ".", "_results", ".", "keys", "(", ")", ":", "# remove from results", "rs", "=", "self", ".", "_re...
37.9375
15.875
def trackSeek(path, artist, album, track, trackNum, fmt): """Actually runs the program""" hiddenName = "(Hidden Track).{}".format(fmt) trackName = track + ".{}".format(fmt) songIn = instantiateSong(path) times = findGap(songIn) saveFiles(trackName, hiddenName, splitSong(songIn, times[0], times[1]), artist, album, trackNum) # return [path, track.rsplit('/',1)[0] +'/{}'.format(hiddenName)] return [trackName, hiddenName]
[ "def", "trackSeek", "(", "path", ",", "artist", ",", "album", ",", "track", ",", "trackNum", ",", "fmt", ")", ":", "hiddenName", "=", "\"(Hidden Track).{}\"", ".", "format", "(", "fmt", ")", "trackName", "=", "track", "+", "\".{}\"", ".", "format", "(", ...
49.444444
15.555556
def perform(self, command, params=None, **kwargs): """Execute a command. Arguments can be supplied either as a dictionary or as keyword arguments. Examples: stc.perform('LoadFromXml', {'filename':'config.xml'}) stc.perform('LoadFromXml', filename='config.xml') Arguments: command -- Command to execute. params -- Optional. Dictionary of parameters (name-value pairs). kwargs -- Optional keyword arguments (name=value pairs). Return: Data from command. """ self._check_session() if not params: params = {} if kwargs: params.update(kwargs) params['command'] = command status, data = self._rest.post_request('perform', None, params) return data
[ "def", "perform", "(", "self", ",", "command", ",", "params", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_check_session", "(", ")", "if", "not", "params", ":", "params", "=", "{", "}", "if", "kwargs", ":", "params", ".", "update"...
32.04
20.44
def html(self): """Gives an html representation of the assessment.""" output = self.html_preamble output += self._repr_html_() output += self.html_post return output
[ "def", "html", "(", "self", ")", ":", "output", "=", "self", ".", "html_preamble", "output", "+=", "self", ".", "_repr_html_", "(", ")", "output", "+=", "self", ".", "html_post", "return", "output" ]
33.333333
10.166667
def gdcsreporter(self, analysistype='GDCS'): """ Creates a report of the GDCS results :param analysistype: The variable to use when accessing attributes in the metadata object """ logging.info('Creating {} report'.format(analysistype)) # Initialise list to store all the GDCS genes, and genera in the analysis gdcs = list() genera = list() for sample in self.runmetadata.samples: if sample.general.bestassemblyfile != 'NA': if os.path.isdir(sample[analysistype].targetpath): # Update the fai dict with all the genes in the analysis, rather than just those with baited hits self.gdcs_fai(sample) sample[analysistype].createreport = True # Determine which genera are present in the analysis if sample.general.closestrefseqgenus not in genera: genera.append(sample.general.closestrefseqgenus) try: # Add all the GDCS genes to the list for gene in sorted(sample[analysistype].faidict): if gene not in gdcs: gdcs.append(gene) except AttributeError: sample[analysistype].createreport = False else: sample[analysistype].createreport = False else: sample[analysistype].createreport = False sample.general.incomplete = True header = 'Strain,Genus,Matches,MeanCoverage,Pass/Fail,{},\n'.format(','.join(gdcs)) data = str() with open(os.path.join(self.reportpath, '{}.csv'.format(analysistype)), 'w') as report: # Sort the samples in the report based on the closest refseq genus e.g. all samples with the same genus # will be grouped together in the report for genus in genera: for sample in self.runmetadata.samples: if sample.general.closestrefseqgenus == genus: if sample[analysistype].createreport: sample[analysistype].totaldepth = list() # Add the sample to the report if it matches the current genus # if genus == sample.general.closestrefseqgenus: data += '{},{},'.format(sample.name, genus) # Initialise a variable to store the number of GDCS genes were matched count = 0 # As I want the count to be in the report before all the gene results, this string will # store the specific sample information, and will be added to data once count is known specific = str() for gene in gdcs: # As there are different genes present in the GDCS databases for each organism of # interest, genes that did not match because they're absent in the specific database are # indicated using an X if gene not in [result for result in sample[analysistype].faidict]: specific += 'X,' else: try: # Report the necessary information for each gene result identity = sample[analysistype].results[gene] specific += '{}% ({} +/- {}),'\ .format(identity, sample[analysistype].avgdepth[gene], sample[analysistype].standarddev[gene]) sample[analysistype].totaldepth.append( float(sample[analysistype].avgdepth[gene])) count += 1 # If the gene was missing from the results attribute, add a - to the cell except (KeyError, AttributeError): sample.general.incomplete = True specific += '-,' # Calculate the mean depth of the genes and the standard deviation sample[analysistype].mean = numpy.mean(sample[analysistype].totaldepth) sample[analysistype].stddev = numpy.std(sample[analysistype].totaldepth) # Determine whether the sample pass the necessary quality criteria: # Pass, all GDCS, mean coverage greater than 20X coverage; # ?: Indeterminate value; # -: Fail value # Allow one missing GDCS to still be considered a pass if count >= len(sample[analysistype].faidict) - 1: if sample[analysistype].mean > 20: quality = '+' else: quality = '?' sample.general.incomplete = True else: quality = '-' sample.general.incomplete = True # Add the count, mean depth with standard deviation, the pass/fail determination, # and the total number of GDCS genes as well as the results data += '{hits}/{total},{mean} +/- {std},{fail},{gdcs}\n'\ .format(hits=str(count), total=len(sample[analysistype].faidict), mean='{:.2f}'.format(sample[analysistype].mean), std='{:.2f}'.format(sample[analysistype].stddev), fail=quality, gdcs=specific) # # Any samples with a best assembly of 'NA' are considered incomplete. # else: # data += '{},{},,,-\n'.format(sample.name, sample.general.closestrefseqgenus) # sample.general.incomplete = True elif sample.general.closestrefseqgenus == 'NA': data += '{}\n'.format(sample.name) sample.general.incomplete = True # Write the header and data to file report.write(header) report.write(data)
[ "def", "gdcsreporter", "(", "self", ",", "analysistype", "=", "'GDCS'", ")", ":", "logging", ".", "info", "(", "'Creating {} report'", ".", "format", "(", "analysistype", ")", ")", "# Initialise list to store all the GDCS genes, and genera in the analysis", "gdcs", "=",...
65.5
29.230769
def extract_HBS_learning_curves(runs): """ function to get the hyperband learning curves This is an example function showing the interface to use the HB_result.get_learning_curves method. Parameters ---------- runs: list of HB_result.run objects the performed runs for an unspecified config Returns ------- list of learning curves: list of lists of tuples An individual learning curve is a list of (t, x_t) tuples. This function must return a list of these. One could think of cases where one could extract multiple learning curves from these runs, e.g. if each run is an independent training run of a neural network on the data. """ sr = sorted(runs, key=lambda r: r.budget) lc = list(filter(lambda t: not t[1] is None, [(r.budget, r.loss) for r in sr])) return([lc,])
[ "def", "extract_HBS_learning_curves", "(", "runs", ")", ":", "sr", "=", "sorted", "(", "runs", ",", "key", "=", "lambda", "r", ":", "r", ".", "budget", ")", "lc", "=", "list", "(", "filter", "(", "lambda", "t", ":", "not", "t", "[", "1", "]", "is...
28.777778
20.777778
def _format_array(x, fmt): # type: (Any, str) -> str """ >>> _format_array([0, 1.0], "{:0.3f}") '[0.000, 1.000]' """ value_repr = ", ".join(fmt.format(v) for v in x) return "[{}]".format(value_repr)
[ "def", "_format_array", "(", "x", ",", "fmt", ")", ":", "# type: (Any, str) -> str", "value_repr", "=", "\", \"", ".", "join", "(", "fmt", ".", "format", "(", "v", ")", "for", "v", "in", "x", ")", "return", "\"[{}]\"", ".", "format", "(", "value_repr", ...
27.375
7.875
def read_gtf_line(cols, field="name"): """parse gtf line to get class/name information""" field = field.lower() try: group = cols[2] attrs = cols[8].split(";") name = [attr.strip().split(" ")[1] for attr in attrs if attr.strip().split(" ")[0].lower().endswith(field)] if not name: name = [attr.strip().split(" ")[1] for attr in attrs if attr.strip().split(" ")[0].lower().endswith("gene_id")] if not name: name = ["None"] biotype = [attr.strip().split(" ")[1] for attr in attrs if attr.strip().split(" ")[0].lower().endswith("biotype")] if biotype: group = biotype[0] c = cols[0] s = int(cols[3]) e = int(cols[4]) st = cols[6] return [c, s, e, st, group, name[0]] except(Exception, e): logger.error(cols) logger.error("File is not in correct format") logger.error("Expect chr source feature start end . strand attributes") logger.error("Attributes are 'gene_name SNCA; gene_id ENSG; '") logger.error("The 3rd column is used as type of small RNA (like miRNA)") logger.error("at least should contains '; *name NAME; '") logger.error(e) raise
[ "def", "read_gtf_line", "(", "cols", ",", "field", "=", "\"name\"", ")", ":", "field", "=", "field", ".", "lower", "(", ")", "try", ":", "group", "=", "cols", "[", "2", "]", "attrs", "=", "cols", "[", "8", "]", ".", "split", "(", "\";\"", ")", ...
43.714286
24.785714
def protein_map_from_twg(twg): """Build map of entity texts to validate protein grounding. Looks at the grounding of the entity texts extracted from the statements and finds proteins where there is grounding to a human protein that maps to an HGNC name that is an exact match to the entity text. Returns a dict that can be used to update/expand the grounding map. Parameters ---------- twg : list of tuple list of tuples of the form output by agent_texts_with_grounding Returns ------- protein_map : dict dict keyed on agent text with associated values {'TEXT': agent_text, 'UP': uniprot_id}. Entries are for agent texts where the grounding map was able to find human protein grounded to this agent_text in Uniprot. """ protein_map = {} unmatched = 0 matched = 0 logger.info('Building grounding map for human proteins') for agent_text, grounding_list, _ in twg: # If 'UP' (Uniprot) not one of the grounding entries for this text, # then we skip it. if 'UP' not in [entry[0] for entry in grounding_list]: continue # Otherwise, collect all the Uniprot IDs for this protein. uniprot_ids = [entry[1] for entry in grounding_list if entry[0] == 'UP'] # For each Uniprot ID, look up the species for uniprot_id in uniprot_ids: # If it's not a human protein, skip it mnemonic = uniprot_client.get_mnemonic(uniprot_id) if mnemonic is None or not mnemonic.endswith('_HUMAN'): continue # Otherwise, look up the gene name in HGNC and match against the # agent text gene_name = uniprot_client.get_gene_name(uniprot_id) if gene_name is None: unmatched += 1 continue if agent_text.upper() == gene_name.upper(): matched += 1 protein_map[agent_text] = {'TEXT': agent_text, 'UP': uniprot_id} else: unmatched += 1 logger.info('Exact matches for %d proteins' % matched) logger.info('No match (or no gene name) for %d proteins' % unmatched) return protein_map
[ "def", "protein_map_from_twg", "(", "twg", ")", ":", "protein_map", "=", "{", "}", "unmatched", "=", "0", "matched", "=", "0", "logger", ".", "info", "(", "'Building grounding map for human proteins'", ")", "for", "agent_text", ",", "grounding_list", ",", "_", ...
40.836364
21.072727
def delete(self, bridge): """ Delete a bridge by name :param bridge: bridge name :return: """ args = { 'name': bridge, } self._bridge_chk.check(args) return self._client.json('bridge.delete', args)
[ "def", "delete", "(", "self", ",", "bridge", ")", ":", "args", "=", "{", "'name'", ":", "bridge", ",", "}", "self", ".", "_bridge_chk", ".", "check", "(", "args", ")", "return", "self", ".", "_client", ".", "json", "(", "'bridge.delete'", ",", "args"...
19.357143
18.642857
def _link_field_to_dict(field): """ Utility for ripping apart github's Link header field. It's kind of ugly. """ if not field: return dict() return dict([ ( part.split('; ')[1][5:-1], part.split('; ')[0][1:-1], ) for part in field.split(', ') ])
[ "def", "_link_field_to_dict", "(", "field", ")", ":", "if", "not", "field", ":", "return", "dict", "(", ")", "return", "dict", "(", "[", "(", "part", ".", "split", "(", "'; '", ")", "[", "1", "]", "[", "5", ":", "-", "1", "]", ",", "part", ".",...
25
15.714286
def t_escaped_FORM_FEED_CHAR(self, t): r'\x66' # 'f' t.lexer.pop_state() t.value = unichr(0x000c) return t
[ "def", "t_escaped_FORM_FEED_CHAR", "(", "self", ",", "t", ")", ":", "# 'f'", "t", ".", "lexer", ".", "pop_state", "(", ")", "t", ".", "value", "=", "unichr", "(", "0x000c", ")", "return", "t" ]
27
13
def _retrieve_metadata(stream, channel_name): """Retrieve basic metadata by reading the first file in the cache Parameters ---------- stream: lal stream object Stream containing a channel we want to learn about channel_name: str The name of the channel we want to know the dtype and sample rate of Returns ------- channel_type: lal type enum Enum value which indicates the dtype of the channel sample_rate: int The sample rate of the data within this channel """ lalframe.FrStreamGetVectorLength(channel_name, stream) channel_type = lalframe.FrStreamGetTimeSeriesType(channel_name, stream) create_series_func = _fr_type_map[channel_type][2] get_series_metadata_func = _fr_type_map[channel_type][3] series = create_series_func(channel_name, stream.epoch, 0, 0, lal.ADCCountUnit, 0) get_series_metadata_func(series, stream) return channel_type, int(1.0/series.deltaT)
[ "def", "_retrieve_metadata", "(", "stream", ",", "channel_name", ")", ":", "lalframe", ".", "FrStreamGetVectorLength", "(", "channel_name", ",", "stream", ")", "channel_type", "=", "lalframe", ".", "FrStreamGetTimeSeriesType", "(", "channel_name", ",", "stream", ")"...
42.2
19.4
def set_boot_order(self, position, device): """Puts the given device to the specified position in the boot order. To indicate that no device is associated with the given position, :py:attr:`DeviceType.null` should be used. @todo setHardDiskBootOrder(), setNetworkBootOrder() in position of type int Position in the boot order (@c 1 to the total number of devices the machine can boot from, as returned by :py:func:`ISystemProperties.max_boot_position` ). in device of type :class:`DeviceType` The type of the device used to boot at the given position. raises :class:`OleErrorInvalidarg` Boot @a position out of range. raises :class:`OleErrorNotimpl` Booting from USB @a device currently not supported. """ if not isinstance(position, baseinteger): raise TypeError("position can only be an instance of type baseinteger") if not isinstance(device, DeviceType): raise TypeError("device can only be an instance of type DeviceType") self._call("setBootOrder", in_p=[position, device])
[ "def", "set_boot_order", "(", "self", ",", "position", ",", "device", ")", ":", "if", "not", "isinstance", "(", "position", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"position can only be an instance of type baseinteger\"", ")", "if", "not", "isin...
40.366667
19.366667
def create(cls, domain, login, options, alias): """Create a mailbox.""" cls.echo('Creating your mailbox.') result = cls.call('domain.mailbox.create', domain, login, options) if alias: cls.echo('Creating aliases.') result = cls.set_alias(domain, login, list(alias)) return result
[ "def", "create", "(", "cls", ",", "domain", ",", "login", ",", "options", ",", "alias", ")", ":", "cls", ".", "echo", "(", "'Creating your mailbox.'", ")", "result", "=", "cls", ".", "call", "(", "'domain.mailbox.create'", ",", "domain", ",", "login", ",...
33.5
18.8
def exists(self, user, status=None, symmetrical=False): """ Returns boolean whether or not a relationship exists between the given users. An optional :class:`RelationshipStatus` instance can be specified. """ query = dict( to_users__from_user=self.instance, to_users__to_user=user, to_users__site__pk=settings.SITE_ID, ) if status: query.update(to_users__status=status) if symmetrical: query.update( from_users__to_user=self.instance, from_users__from_user=user, from_users__site__pk=settings.SITE_ID ) if status: query.update(from_users__status=status) return User.objects.filter(**query).exists()
[ "def", "exists", "(", "self", ",", "user", ",", "status", "=", "None", ",", "symmetrical", "=", "False", ")", ":", "query", "=", "dict", "(", "to_users__from_user", "=", "self", ".", "instance", ",", "to_users__to_user", "=", "user", ",", "to_users__site__...
31.96
19.4
def get_Generic_itemtype(sq, simplify=True): """Retrieves the item type from a PEP 484 generic or subclass of such. sq must be a typing.Tuple or (subclass of) typing.Iterable or typing.Container. Consequently this also works with typing.List, typing.Set and typing.Dict. Note that for typing.Dict and mapping types in general, the key type is regarded as item type. For typing.Tuple all contained types are returned as a typing.Union. If simplify == True some effort is taken to eliminate redundancies in such a union. """ if is_Tuple(sq): if simplify: itm_tps = [x for x in get_Tuple_params(sq)] simplify_for_Union(itm_tps) return Union[tuple(itm_tps)] else: return Union[get_Tuple_params(sq)] else: try: res = _select_Generic_superclass_parameters(sq, typing.Container) except TypeError: res = None if res is None: try: res = _select_Generic_superclass_parameters(sq, typing.Iterable) except TypeError: pass if res is None: raise TypeError("Has no itemtype: "+type_str(sq)) else: return res[0]
[ "def", "get_Generic_itemtype", "(", "sq", ",", "simplify", "=", "True", ")", ":", "if", "is_Tuple", "(", "sq", ")", ":", "if", "simplify", ":", "itm_tps", "=", "[", "x", "for", "x", "in", "get_Tuple_params", "(", "sq", ")", "]", "simplify_for_Union", "...
41.758621
21.793103
def cnst_A(self, X): r"""Compute :math:`A \mathbf{x}` component of ADMM problem constraint. """ return self.block_cat(self.cnst_A0(X), self.cnst_A1(X))
[ "def", "cnst_A", "(", "self", ",", "X", ")", ":", "return", "self", ".", "block_cat", "(", "self", ".", "cnst_A0", "(", "X", ")", ",", "self", ".", "cnst_A1", "(", "X", ")", ")" ]
29.833333
17.333333
def issue(self, CorpNum, MgtKeyType, MgtKey, Memo=None, EmailSubject=None, ForceIssue=False, UserID=None): """ ๋ฐœํ–‰ args CorpNum : ํšŒ์› ์‚ฌ์—…์ž ๋ฒˆํ˜ธ MgtKeyType : ๊ด€๋ฆฌ๋ฒˆํ˜ธ ์œ ํ˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : ํŒŒํŠธ๋„ˆ ๊ด€๋ฆฌ๋ฒˆํ˜ธ Memo : ์ฒ˜๋ฆฌ ๋ฉ”๋ชจ EmailSubject : ๋ฐœํ–‰๋ฉ”์ผ ์ด๋ฉ”์ผ ์ œ๋ชฉ ForceIssue : ์ง€์—ฐ๋ฐœํ–‰ ์„ธ๊ธˆ๊ณ„์‚ฐ์„œ ๊ฐ•์ œ๋ฐœํ–‰ ์—ฌ๋ถ€. UserID : ํŒ๋นŒ ํšŒ์›์•„์ด๋”” return ์ฒ˜๋ฆฌ๊ฒฐ๊ณผ. consist of code and message raise PopbillException """ if MgtKeyType not in self.__MgtKeyTypes: raise PopbillException(-99999999, "๊ด€๋ฆฌ๋ฒˆํ˜ธ ํ˜•ํƒœ๊ฐ€ ์˜ฌ๋ฐ”๋ฅด์ง€ ์•Š์Šต๋‹ˆ๋‹ค.") if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "๊ด€๋ฆฌ๋ฒˆํ˜ธ๊ฐ€ ์ž…๋ ฅ๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค.") req = {"forceIssue": ForceIssue} if Memo != None and Memo != '': req["memo"] = Memo if EmailSubject != None and EmailSubject != '': req["emailSubject"] = EmailSubject postData = self._stringtify(req) return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, postData, CorpNum, UserID, "ISSUE")
[ "def", "issue", "(", "self", ",", "CorpNum", ",", "MgtKeyType", ",", "MgtKey", ",", "Memo", "=", "None", ",", "EmailSubject", "=", "None", ",", "ForceIssue", "=", "False", ",", "UserID", "=", "None", ")", ":", "if", "MgtKeyType", "not", "in", "self", ...
36.548387
18.903226
def send_response(self, msgid, error=None, result=None): """Send a response """ msg = self._encoder.create_response(msgid, error, result) self._send_message(msg)
[ "def", "send_response", "(", "self", ",", "msgid", ",", "error", "=", "None", ",", "result", "=", "None", ")", ":", "msg", "=", "self", ".", "_encoder", ".", "create_response", "(", "msgid", ",", "error", ",", "result", ")", "self", ".", "_send_message...
37.8
10
def simple_attention(memory, att_size, mask, keep_prob=1.0, scope="simple_attention"): """Simple attention without any conditions. Computes weighted sum of memory elements. """ with tf.variable_scope(scope): BS, ML, MH = tf.unstack(tf.shape(memory)) memory_do = tf.nn.dropout(memory, keep_prob=keep_prob, noise_shape=[BS, 1, MH]) logits = tf.layers.dense(tf.layers.dense(memory_do, att_size, activation=tf.nn.tanh), 1, use_bias=False) logits = softmax_mask(tf.squeeze(logits, [2]), mask) att_weights = tf.expand_dims(tf.nn.softmax(logits), axis=2) res = tf.reduce_sum(att_weights * memory, axis=1) return res
[ "def", "simple_attention", "(", "memory", ",", "att_size", ",", "mask", ",", "keep_prob", "=", "1.0", ",", "scope", "=", "\"simple_attention\"", ")", ":", "with", "tf", ".", "variable_scope", "(", "scope", ")", ":", "BS", ",", "ML", ",", "MH", "=", "tf...
51.692308
24.153846
def handle_pubrel(self): """Handle incoming PUBREL packet.""" self.logger.info("PUBREL received") ret, mid = self.in_packet.read_uint16() if ret != NC.ERR_SUCCESS: return ret evt = event.EventPubrel(mid) self.push_event(evt) return NC.ERR_SUCCESS
[ "def", "handle_pubrel", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "\"PUBREL received\"", ")", "ret", ",", "mid", "=", "self", ".", "in_packet", ".", "read_uint16", "(", ")", "if", "ret", "!=", "NC", ".", "ERR_SUCCESS", ":", "retur...
23.538462
18.307692
def unacknowledge_problem_if_not_sticky(self): """ Remove the acknowledge if it is not sticky :return: None """ if hasattr(self, 'acknowledgement') and self.acknowledgement is not None: if not self.acknowledgement.sticky: self.unacknowledge_problem()
[ "def", "unacknowledge_problem_if_not_sticky", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'acknowledgement'", ")", "and", "self", ".", "acknowledgement", "is", "not", "None", ":", "if", "not", "self", ".", "acknowledgement", ".", "sticky", ":", ...
34.555556
14.111111
def register_rate_producer(self, rate_name: str, source: Callable[..., pd.DataFrame]=None) -> Pipeline: """Marks a ``Callable`` as the producer of a named rate. This is a convenience wrapper around ``register_value_producer`` that makes sure rate data is appropriately scaled to the size of the simulation time step. It is equivalent to ``register_value_producer(value_name, source, preferred_combiner=replace_combiner, preferred_post_processor=rescale_post_processor)`` Parameters ---------- rate_name : The name of the new dynamic rate pipeline. source : A callable source for the dynamic rate pipeline. Returns ------- Callable A callable reference to the named dynamic rate pipeline. """ return self._value_manager.register_rate_producer(rate_name, source)
[ "def", "register_rate_producer", "(", "self", ",", "rate_name", ":", "str", ",", "source", ":", "Callable", "[", "...", ",", "pd", ".", "DataFrame", "]", "=", "None", ")", "->", "Pipeline", ":", "return", "self", ".", "_value_manager", ".", "register_rate_...
42.428571
29.619048