code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def collection_callback(result=None): """ :type result: opendnp3.CommandPointResult """ print("Header: {0} | Index: {1} | State: {2} | Status: {3}".format( result.headerIndex, result.index, opendnp3.CommandPointStateToString(result.state), opendnp3.CommandStatusToString(result.status) ))
:type result: opendnp3.CommandPointResult
Below is the the instruction that describes the task: ### Input: :type result: opendnp3.CommandPointResult ### Response: def collection_callback(result=None): """ :type result: opendnp3.CommandPointResult """ print("Header: {0} | Index: {1} | State: {2} | Status: {3}".format( result.headerIndex, result.index, opendnp3.CommandPointStateToString(result.state), opendnp3.CommandStatusToString(result.status) ))
def find_label(self, label: Label): """ Helper function that iterates over the program and looks for a JumpTarget that has a Label matching the input label. :param label: Label object to search for in program :return: Program index where ``label`` is found """ for index, action in enumerate(self.program): if isinstance(action, JumpTarget): if label == action.label: return index raise RuntimeError("Improper program - Jump Target not found in the " "input program!")
Helper function that iterates over the program and looks for a JumpTarget that has a Label matching the input label. :param label: Label object to search for in program :return: Program index where ``label`` is found
Below is the the instruction that describes the task: ### Input: Helper function that iterates over the program and looks for a JumpTarget that has a Label matching the input label. :param label: Label object to search for in program :return: Program index where ``label`` is found ### Response: def find_label(self, label: Label): """ Helper function that iterates over the program and looks for a JumpTarget that has a Label matching the input label. :param label: Label object to search for in program :return: Program index where ``label`` is found """ for index, action in enumerate(self.program): if isinstance(action, JumpTarget): if label == action.label: return index raise RuntimeError("Improper program - Jump Target not found in the " "input program!")
def wallet_contains(self, wallet, account): """ Check whether **wallet** contains **account** :param wallet: Wallet to check contains **account** :type wallet: str :param account: Account to check exists in **wallet** :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.wallet_contains( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... account="xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000" ... ) True """ wallet = self._process_value(wallet, 'wallet') account = self._process_value(account, 'account') payload = {"wallet": wallet, "account": account} resp = self.call('wallet_contains', payload) return resp['exists'] == '1'
Check whether **wallet** contains **account** :param wallet: Wallet to check contains **account** :type wallet: str :param account: Account to check exists in **wallet** :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.wallet_contains( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... account="xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000" ... ) True
Below is the the instruction that describes the task: ### Input: Check whether **wallet** contains **account** :param wallet: Wallet to check contains **account** :type wallet: str :param account: Account to check exists in **wallet** :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.wallet_contains( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... account="xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000" ... ) True ### Response: def wallet_contains(self, wallet, account): """ Check whether **wallet** contains **account** :param wallet: Wallet to check contains **account** :type wallet: str :param account: Account to check exists in **wallet** :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.wallet_contains( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... account="xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000" ... ) True """ wallet = self._process_value(wallet, 'wallet') account = self._process_value(account, 'account') payload = {"wallet": wallet, "account": account} resp = self.call('wallet_contains', payload) return resp['exists'] == '1'
def to_api_repr(self): """API repr (JSON format) for entry. """ info = super(TextEntry, self).to_api_repr() info["textPayload"] = self.payload return info
API repr (JSON format) for entry.
Below is the the instruction that describes the task: ### Input: API repr (JSON format) for entry. ### Response: def to_api_repr(self): """API repr (JSON format) for entry. """ info = super(TextEntry, self).to_api_repr() info["textPayload"] = self.payload return info
def _check_psutil(self, instance): """ Gather metrics about connections states and interfaces counters using psutil facilities """ custom_tags = instance.get('tags', []) if self._collect_cx_state: self._cx_state_psutil(tags=custom_tags) self._cx_counters_psutil(tags=custom_tags)
Gather metrics about connections states and interfaces counters using psutil facilities
Below is the the instruction that describes the task: ### Input: Gather metrics about connections states and interfaces counters using psutil facilities ### Response: def _check_psutil(self, instance): """ Gather metrics about connections states and interfaces counters using psutil facilities """ custom_tags = instance.get('tags', []) if self._collect_cx_state: self._cx_state_psutil(tags=custom_tags) self._cx_counters_psutil(tags=custom_tags)
def locate_files(root_dir): """Find all python files in the given directory and all subfolders.""" all_files = [] root_dir = os.path.abspath(root_dir) for dir_name, subdirs, files in os.walk(root_dir): for f in files: if f.endswith(".py"): all_files.append(os.path.join(dir_name, f)) return all_files
Find all python files in the given directory and all subfolders.
Below is the the instruction that describes the task: ### Input: Find all python files in the given directory and all subfolders. ### Response: def locate_files(root_dir): """Find all python files in the given directory and all subfolders.""" all_files = [] root_dir = os.path.abspath(root_dir) for dir_name, subdirs, files in os.walk(root_dir): for f in files: if f.endswith(".py"): all_files.append(os.path.join(dir_name, f)) return all_files
def htmlprint(*values, plain=None, **options): """ Convert HTML to VTML and then print it. Follows same semantics as vtmlprint. """ print(*[htmlrender(x, plain=plain) for x in values], **options)
Convert HTML to VTML and then print it. Follows same semantics as vtmlprint.
Below is the the instruction that describes the task: ### Input: Convert HTML to VTML and then print it. Follows same semantics as vtmlprint. ### Response: def htmlprint(*values, plain=None, **options): """ Convert HTML to VTML and then print it. Follows same semantics as vtmlprint. """ print(*[htmlrender(x, plain=plain) for x in values], **options)
def delete(self, obj, force=False): """Deletes all of the fields at the specified locations. args: ``obj=``\ *OBJECT* the object to remove the fields from ``force=``\ *BOOL* if True, missing attributes do not raise errors. Otherwise, the first failure raises an exception without making any changes to ``obj``. """ # TODO: this could be a whole lot more efficient! if not force: for fs in self: try: fs.get(obj) except FieldSelectorException: raise for fs in self: try: fs.delete(obj) except FieldSelectorException: pass
Deletes all of the fields at the specified locations. args: ``obj=``\ *OBJECT* the object to remove the fields from ``force=``\ *BOOL* if True, missing attributes do not raise errors. Otherwise, the first failure raises an exception without making any changes to ``obj``.
Below is the the instruction that describes the task: ### Input: Deletes all of the fields at the specified locations. args: ``obj=``\ *OBJECT* the object to remove the fields from ``force=``\ *BOOL* if True, missing attributes do not raise errors. Otherwise, the first failure raises an exception without making any changes to ``obj``. ### Response: def delete(self, obj, force=False): """Deletes all of the fields at the specified locations. args: ``obj=``\ *OBJECT* the object to remove the fields from ``force=``\ *BOOL* if True, missing attributes do not raise errors. Otherwise, the first failure raises an exception without making any changes to ``obj``. """ # TODO: this could be a whole lot more efficient! if not force: for fs in self: try: fs.get(obj) except FieldSelectorException: raise for fs in self: try: fs.delete(obj) except FieldSelectorException: pass
def compute_discounts(self, precision=None): ''' Returns the total amount of discounts for this line with a specific number of decimals. @param precision:int number of decimal places @return: Decimal ''' gross = self.compute_gross(precision) return min(gross, sum([d.compute(gross, precision) for d in self.__discounts]))
Returns the total amount of discounts for this line with a specific number of decimals. @param precision:int number of decimal places @return: Decimal
Below is the the instruction that describes the task: ### Input: Returns the total amount of discounts for this line with a specific number of decimals. @param precision:int number of decimal places @return: Decimal ### Response: def compute_discounts(self, precision=None): ''' Returns the total amount of discounts for this line with a specific number of decimals. @param precision:int number of decimal places @return: Decimal ''' gross = self.compute_gross(precision) return min(gross, sum([d.compute(gross, precision) for d in self.__discounts]))
def generate_records(self, infile): """ Process a file of rest and yield dictionaries """ state = 0 record = {} for item in self.generate_lines(infile): line = item['line'] heading = item['heading'] # any Markdown heading is just a caption, no image if heading: record['heading'] = True record['caption'] = line[1:].strip() state = 'caption' continue if not line[0].isspace(): # at a potential image if state == 'caption': yield record record = {} state = 0 if state == 'caption': record['caption'] += '\n' + line[:-1] continue fields = line.split(',') # nothing there, carry on if not fields: continue image = fields[0].strip() if not image: continue record['image'] = image try: time = float(fields[1]) except: time = 0 record['time'] = time try: caption = fields[2].strip() except: caption = None if caption: record['caption'] = caption # yield it if we have anything if record: yield record record = {}
Process a file of rest and yield dictionaries
Below is the the instruction that describes the task: ### Input: Process a file of rest and yield dictionaries ### Response: def generate_records(self, infile): """ Process a file of rest and yield dictionaries """ state = 0 record = {} for item in self.generate_lines(infile): line = item['line'] heading = item['heading'] # any Markdown heading is just a caption, no image if heading: record['heading'] = True record['caption'] = line[1:].strip() state = 'caption' continue if not line[0].isspace(): # at a potential image if state == 'caption': yield record record = {} state = 0 if state == 'caption': record['caption'] += '\n' + line[:-1] continue fields = line.split(',') # nothing there, carry on if not fields: continue image = fields[0].strip() if not image: continue record['image'] = image try: time = float(fields[1]) except: time = 0 record['time'] = time try: caption = fields[2].strip() except: caption = None if caption: record['caption'] = caption # yield it if we have anything if record: yield record record = {}
def ls(obj=None, case_sensitive=False, verbose=False): """List available layers, or infos on a given layer class or name. params: - obj: Packet / packet name to use - case_sensitive: if obj is a string, is it case sensitive? - verbose """ is_string = isinstance(obj, six.string_types) if obj is None or is_string: tip = False if obj is None: tip = True all_layers = sorted(conf.layers, key=lambda x: x.__name__) else: pattern = re.compile(obj, 0 if case_sensitive else re.I) # We first order by accuracy, then length if case_sensitive: sorter = lambda x: (x.__name__.index(obj), len(x.__name__)) else: obj = obj.lower() sorter = lambda x: (x.__name__.lower().index(obj), len(x.__name__)) all_layers = sorted((layer for layer in conf.layers if (isinstance(layer.__name__, str) and pattern.search(layer.__name__)) or (isinstance(layer.name, str) and pattern.search(layer.name))), key=sorter) for layer in all_layers: print("%-10s : %s" % (layer.__name__, layer._name)) if tip and conf.interactive: print("\nTIP: You may use explore() to navigate through all " "layers using a clear GUI") else: is_pkt = isinstance(obj, Packet) if issubtype(obj, Packet) or is_pkt: for f in obj.fields_desc: cur_fld = f attrs = [] long_attrs = [] while isinstance(cur_fld, (Emph, ConditionalField)): if isinstance(cur_fld, ConditionalField): attrs.append(cur_fld.__class__.__name__[:4]) cur_fld = cur_fld.fld if verbose and isinstance(cur_fld, EnumField) \ and hasattr(cur_fld, "i2s"): if len(cur_fld.i2s) < 50: long_attrs.extend( "%s: %d" % (strval, numval) for numval, strval in sorted(six.iteritems(cur_fld.i2s)) ) elif isinstance(cur_fld, MultiEnumField): fld_depend = cur_fld.depends_on(obj.__class__ if is_pkt else obj) attrs.append("Depends on %s" % fld_depend.name) if verbose: cur_i2s = cur_fld.i2s_multi.get( cur_fld.depends_on(obj if is_pkt else obj()), {} ) if len(cur_i2s) < 50: long_attrs.extend( "%s: %d" % (strval, numval) for numval, strval in sorted(six.iteritems(cur_i2s)) ) elif verbose and isinstance(cur_fld, FlagsField): names = cur_fld.names long_attrs.append(", ".join(names)) class_name = "%s (%s)" % ( cur_fld.__class__.__name__, ", ".join(attrs)) if attrs else cur_fld.__class__.__name__ if isinstance(cur_fld, BitField): class_name += " (%d bit%s)" % (cur_fld.size, "s" if cur_fld.size > 1 else "") print("%-10s : %-35s =" % (f.name, class_name), end=' ') if is_pkt: print("%-15r" % (getattr(obj, f.name),), end=' ') print("(%r)" % (f.default,)) for attr in long_attrs: print("%-15s%s" % ("", attr)) if is_pkt and not isinstance(obj.payload, NoPayload): print("--") ls(obj.payload) else: print("Not a packet class or name. Type 'ls()' to list packet classes.")
List available layers, or infos on a given layer class or name. params: - obj: Packet / packet name to use - case_sensitive: if obj is a string, is it case sensitive? - verbose
Below is the the instruction that describes the task: ### Input: List available layers, or infos on a given layer class or name. params: - obj: Packet / packet name to use - case_sensitive: if obj is a string, is it case sensitive? - verbose ### Response: def ls(obj=None, case_sensitive=False, verbose=False): """List available layers, or infos on a given layer class or name. params: - obj: Packet / packet name to use - case_sensitive: if obj is a string, is it case sensitive? - verbose """ is_string = isinstance(obj, six.string_types) if obj is None or is_string: tip = False if obj is None: tip = True all_layers = sorted(conf.layers, key=lambda x: x.__name__) else: pattern = re.compile(obj, 0 if case_sensitive else re.I) # We first order by accuracy, then length if case_sensitive: sorter = lambda x: (x.__name__.index(obj), len(x.__name__)) else: obj = obj.lower() sorter = lambda x: (x.__name__.lower().index(obj), len(x.__name__)) all_layers = sorted((layer for layer in conf.layers if (isinstance(layer.__name__, str) and pattern.search(layer.__name__)) or (isinstance(layer.name, str) and pattern.search(layer.name))), key=sorter) for layer in all_layers: print("%-10s : %s" % (layer.__name__, layer._name)) if tip and conf.interactive: print("\nTIP: You may use explore() to navigate through all " "layers using a clear GUI") else: is_pkt = isinstance(obj, Packet) if issubtype(obj, Packet) or is_pkt: for f in obj.fields_desc: cur_fld = f attrs = [] long_attrs = [] while isinstance(cur_fld, (Emph, ConditionalField)): if isinstance(cur_fld, ConditionalField): attrs.append(cur_fld.__class__.__name__[:4]) cur_fld = cur_fld.fld if verbose and isinstance(cur_fld, EnumField) \ and hasattr(cur_fld, "i2s"): if len(cur_fld.i2s) < 50: long_attrs.extend( "%s: %d" % (strval, numval) for numval, strval in sorted(six.iteritems(cur_fld.i2s)) ) elif isinstance(cur_fld, MultiEnumField): fld_depend = cur_fld.depends_on(obj.__class__ if is_pkt else obj) attrs.append("Depends on %s" % fld_depend.name) if verbose: cur_i2s = cur_fld.i2s_multi.get( cur_fld.depends_on(obj if is_pkt else obj()), {} ) if len(cur_i2s) < 50: long_attrs.extend( "%s: %d" % (strval, numval) for numval, strval in sorted(six.iteritems(cur_i2s)) ) elif verbose and isinstance(cur_fld, FlagsField): names = cur_fld.names long_attrs.append(", ".join(names)) class_name = "%s (%s)" % ( cur_fld.__class__.__name__, ", ".join(attrs)) if attrs else cur_fld.__class__.__name__ if isinstance(cur_fld, BitField): class_name += " (%d bit%s)" % (cur_fld.size, "s" if cur_fld.size > 1 else "") print("%-10s : %-35s =" % (f.name, class_name), end=' ') if is_pkt: print("%-15r" % (getattr(obj, f.name),), end=' ') print("(%r)" % (f.default,)) for attr in long_attrs: print("%-15s%s" % ("", attr)) if is_pkt and not isinstance(obj.payload, NoPayload): print("--") ls(obj.payload) else: print("Not a packet class or name. Type 'ls()' to list packet classes.")
def ftp_connect(self, host, user='anonymous', password='anonymous@', port=21, timeout=30, connId='default'): """ Constructs FTP object, opens a connection and login. Call this function before any other (otherwise raises exception). Returns server output. Parameters: - host - server host address - user(optional) - FTP user name. If not given, 'anonymous' is used. - password(optional) - FTP password. If not given, 'anonymous@' is used. - port(optional) - TCP port. By default 21. - timeout(optional) - timeout in seconds. By default 30. - connId(optional) - connection identifier. By default equals 'default' Examples: | ftp connect | 192.168.1.10 | mylogin | mypassword | | | | ftp connect | 192.168.1.10 | | | | | | ftp connect | 192.168.1.10 | mylogin | mypassword | connId=secondConn | | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | 20 | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | | | ftp connect | 192.168.1.10 | mylogin | mypassword | timeout=20 | | | ftp connect | 192.168.1.10 | port=29 | timeout=20 | | | """ if connId in self.ftpList: errMsg = "Connection with ID %s already exist. It should be deleted before this step." % connId raise FtpLibraryError(errMsg) else: newFtp = None outputMsg = "" try: timeout = int(timeout) port = int(port) newFtp = ftplib.FTP() outputMsg += newFtp.connect(host, port, timeout) outputMsg += newFtp.login(user,password) except socket.error as se: raise FtpLibraryError('Socket error exception occured.') except ftplib.all_errors as e: raise FtpLibraryError(str(e)) except Exception as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) self.__addNewConnection(newFtp, connId)
Constructs FTP object, opens a connection and login. Call this function before any other (otherwise raises exception). Returns server output. Parameters: - host - server host address - user(optional) - FTP user name. If not given, 'anonymous' is used. - password(optional) - FTP password. If not given, 'anonymous@' is used. - port(optional) - TCP port. By default 21. - timeout(optional) - timeout in seconds. By default 30. - connId(optional) - connection identifier. By default equals 'default' Examples: | ftp connect | 192.168.1.10 | mylogin | mypassword | | | | ftp connect | 192.168.1.10 | | | | | | ftp connect | 192.168.1.10 | mylogin | mypassword | connId=secondConn | | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | 20 | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | | | ftp connect | 192.168.1.10 | mylogin | mypassword | timeout=20 | | | ftp connect | 192.168.1.10 | port=29 | timeout=20 | | |
Below is the the instruction that describes the task: ### Input: Constructs FTP object, opens a connection and login. Call this function before any other (otherwise raises exception). Returns server output. Parameters: - host - server host address - user(optional) - FTP user name. If not given, 'anonymous' is used. - password(optional) - FTP password. If not given, 'anonymous@' is used. - port(optional) - TCP port. By default 21. - timeout(optional) - timeout in seconds. By default 30. - connId(optional) - connection identifier. By default equals 'default' Examples: | ftp connect | 192.168.1.10 | mylogin | mypassword | | | | ftp connect | 192.168.1.10 | | | | | | ftp connect | 192.168.1.10 | mylogin | mypassword | connId=secondConn | | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | 20 | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | | | ftp connect | 192.168.1.10 | mylogin | mypassword | timeout=20 | | | ftp connect | 192.168.1.10 | port=29 | timeout=20 | | | ### Response: def ftp_connect(self, host, user='anonymous', password='anonymous@', port=21, timeout=30, connId='default'): """ Constructs FTP object, opens a connection and login. Call this function before any other (otherwise raises exception). Returns server output. Parameters: - host - server host address - user(optional) - FTP user name. If not given, 'anonymous' is used. - password(optional) - FTP password. If not given, 'anonymous@' is used. - port(optional) - TCP port. By default 21. - timeout(optional) - timeout in seconds. By default 30. - connId(optional) - connection identifier. By default equals 'default' Examples: | ftp connect | 192.168.1.10 | mylogin | mypassword | | | | ftp connect | 192.168.1.10 | | | | | | ftp connect | 192.168.1.10 | mylogin | mypassword | connId=secondConn | | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | 20 | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | | | ftp connect | 192.168.1.10 | mylogin | mypassword | timeout=20 | | | ftp connect | 192.168.1.10 | port=29 | timeout=20 | | | """ if connId in self.ftpList: errMsg = "Connection with ID %s already exist. It should be deleted before this step." % connId raise FtpLibraryError(errMsg) else: newFtp = None outputMsg = "" try: timeout = int(timeout) port = int(port) newFtp = ftplib.FTP() outputMsg += newFtp.connect(host, port, timeout) outputMsg += newFtp.login(user,password) except socket.error as se: raise FtpLibraryError('Socket error exception occured.') except ftplib.all_errors as e: raise FtpLibraryError(str(e)) except Exception as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) self.__addNewConnection(newFtp, connId)
def DFS(G): """ Algorithm for depth-first searching the vertices of a graph. """ if not G.vertices: raise GraphInsertError("This graph have no vertices.") color = {} pred = {} reach = {} finish = {} def DFSvisit(G, current, time): color[current] = 'grey' time += 1 reach[current] = time for vertex in G.vertices[current]: if color[vertex] == 'white': pred[vertex] = current time = DFSvisit(G, vertex, time) color[current] = 'black' time += 1 finish[current] = time return time for vertex in G.vertices: color[vertex] = 'white' pred[vertex] = None reach[vertex] = 0 finish[vertex] = 0 time = 0 for vertex in G.vertices: if color[vertex] == 'white': time = DFSvisit(G, vertex, time) # Dictionary for vertex data after DFS # -> vertex_data = {vertex: (predecessor, reach, finish), } vertex_data = {} for vertex in G.vertices: vertex_data[vertex] = (pred[vertex], reach[vertex], finish[vertex]) return vertex_data
Algorithm for depth-first searching the vertices of a graph.
Below is the the instruction that describes the task: ### Input: Algorithm for depth-first searching the vertices of a graph. ### Response: def DFS(G): """ Algorithm for depth-first searching the vertices of a graph. """ if not G.vertices: raise GraphInsertError("This graph have no vertices.") color = {} pred = {} reach = {} finish = {} def DFSvisit(G, current, time): color[current] = 'grey' time += 1 reach[current] = time for vertex in G.vertices[current]: if color[vertex] == 'white': pred[vertex] = current time = DFSvisit(G, vertex, time) color[current] = 'black' time += 1 finish[current] = time return time for vertex in G.vertices: color[vertex] = 'white' pred[vertex] = None reach[vertex] = 0 finish[vertex] = 0 time = 0 for vertex in G.vertices: if color[vertex] == 'white': time = DFSvisit(G, vertex, time) # Dictionary for vertex data after DFS # -> vertex_data = {vertex: (predecessor, reach, finish), } vertex_data = {} for vertex in G.vertices: vertex_data[vertex] = (pred[vertex], reach[vertex], finish[vertex]) return vertex_data
def bel_edges( self, nanopub: Mapping[str, Any], namespace_targets: Mapping[str, List[str]] = {}, rules: List[str] = [], orthologize_target: str = None, ) -> List[Mapping[str, Any]]: """Create BEL Edges from BEL nanopub Args: nanopub (Mapping[str, Any]): bel nanopub namespace_targets (Mapping[str, List[str]]): what namespaces to canonicalize rules (List[str]): which computed edge rules to process, default is all, look at BEL Specification yaml file for computed edge signature keys, e.g. degradation, if any rule in list is 'skip', then skip computing edges just return primary_edge orthologize_target (str): species to convert BEL into, e.g. TAX:10090 for mouse, default option does not orthologize Returns: List[Mapping[str, Any]]: edge list with edge attributes (e.g. context) """ edges = bel.edge.edges.create_edges( nanopub, self.endpoint, namespace_targets=namespace_targets, rules=rules, orthologize_target=orthologize_target, ) return edges
Create BEL Edges from BEL nanopub Args: nanopub (Mapping[str, Any]): bel nanopub namespace_targets (Mapping[str, List[str]]): what namespaces to canonicalize rules (List[str]): which computed edge rules to process, default is all, look at BEL Specification yaml file for computed edge signature keys, e.g. degradation, if any rule in list is 'skip', then skip computing edges just return primary_edge orthologize_target (str): species to convert BEL into, e.g. TAX:10090 for mouse, default option does not orthologize Returns: List[Mapping[str, Any]]: edge list with edge attributes (e.g. context)
Below is the the instruction that describes the task: ### Input: Create BEL Edges from BEL nanopub Args: nanopub (Mapping[str, Any]): bel nanopub namespace_targets (Mapping[str, List[str]]): what namespaces to canonicalize rules (List[str]): which computed edge rules to process, default is all, look at BEL Specification yaml file for computed edge signature keys, e.g. degradation, if any rule in list is 'skip', then skip computing edges just return primary_edge orthologize_target (str): species to convert BEL into, e.g. TAX:10090 for mouse, default option does not orthologize Returns: List[Mapping[str, Any]]: edge list with edge attributes (e.g. context) ### Response: def bel_edges( self, nanopub: Mapping[str, Any], namespace_targets: Mapping[str, List[str]] = {}, rules: List[str] = [], orthologize_target: str = None, ) -> List[Mapping[str, Any]]: """Create BEL Edges from BEL nanopub Args: nanopub (Mapping[str, Any]): bel nanopub namespace_targets (Mapping[str, List[str]]): what namespaces to canonicalize rules (List[str]): which computed edge rules to process, default is all, look at BEL Specification yaml file for computed edge signature keys, e.g. degradation, if any rule in list is 'skip', then skip computing edges just return primary_edge orthologize_target (str): species to convert BEL into, e.g. TAX:10090 for mouse, default option does not orthologize Returns: List[Mapping[str, Any]]: edge list with edge attributes (e.g. context) """ edges = bel.edge.edges.create_edges( nanopub, self.endpoint, namespace_targets=namespace_targets, rules=rules, orthologize_target=orthologize_target, ) return edges
def chebyshev(h1, h2): # 12 us @array, 36 us @list \w 100 bins r""" Chebyshev distance. Also Tchebychev distance, Maximum or :math:`L_{\infty}` metric; equal to Minowski distance with :math:`p=+\infty`. For the case of :math:`p=-\infty`, use `chebyshev_neg`. The Chebyshev distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_{\infty}(H, H') = \max_{m=1}^M|H_m-H'_m| *Attributes:* - semimetric (triangle equation satisfied?) *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram. Returns ------- chebyshev : float Chebyshev distance. See also -------- minowski, chebyshev_neg """ h1, h2 = __prepare_histogram(h1, h2) return max(scipy.absolute(h1 - h2))
r""" Chebyshev distance. Also Tchebychev distance, Maximum or :math:`L_{\infty}` metric; equal to Minowski distance with :math:`p=+\infty`. For the case of :math:`p=-\infty`, use `chebyshev_neg`. The Chebyshev distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_{\infty}(H, H') = \max_{m=1}^M|H_m-H'_m| *Attributes:* - semimetric (triangle equation satisfied?) *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram. Returns ------- chebyshev : float Chebyshev distance. See also -------- minowski, chebyshev_neg
Below is the the instruction that describes the task: ### Input: r""" Chebyshev distance. Also Tchebychev distance, Maximum or :math:`L_{\infty}` metric; equal to Minowski distance with :math:`p=+\infty`. For the case of :math:`p=-\infty`, use `chebyshev_neg`. The Chebyshev distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_{\infty}(H, H') = \max_{m=1}^M|H_m-H'_m| *Attributes:* - semimetric (triangle equation satisfied?) *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram. Returns ------- chebyshev : float Chebyshev distance. See also -------- minowski, chebyshev_neg ### Response: def chebyshev(h1, h2): # 12 us @array, 36 us @list \w 100 bins r""" Chebyshev distance. Also Tchebychev distance, Maximum or :math:`L_{\infty}` metric; equal to Minowski distance with :math:`p=+\infty`. For the case of :math:`p=-\infty`, use `chebyshev_neg`. The Chebyshev distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_{\infty}(H, H') = \max_{m=1}^M|H_m-H'_m| *Attributes:* - semimetric (triangle equation satisfied?) *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram. Returns ------- chebyshev : float Chebyshev distance. See also -------- minowski, chebyshev_neg """ h1, h2 = __prepare_histogram(h1, h2) return max(scipy.absolute(h1 - h2))
def _update_axes_color(self, color): """Internal helper to set the axes label color""" prop_x = self.axes_actor.GetXAxisCaptionActor2D().GetCaptionTextProperty() prop_y = self.axes_actor.GetYAxisCaptionActor2D().GetCaptionTextProperty() prop_z = self.axes_actor.GetZAxisCaptionActor2D().GetCaptionTextProperty() if color is None: color = rcParams['font']['color'] color = parse_color(color) for prop in [prop_x, prop_y, prop_z]: prop.SetColor(color[0], color[1], color[2]) prop.SetShadow(False) return
Internal helper to set the axes label color
Below is the the instruction that describes the task: ### Input: Internal helper to set the axes label color ### Response: def _update_axes_color(self, color): """Internal helper to set the axes label color""" prop_x = self.axes_actor.GetXAxisCaptionActor2D().GetCaptionTextProperty() prop_y = self.axes_actor.GetYAxisCaptionActor2D().GetCaptionTextProperty() prop_z = self.axes_actor.GetZAxisCaptionActor2D().GetCaptionTextProperty() if color is None: color = rcParams['font']['color'] color = parse_color(color) for prop in [prop_x, prop_y, prop_z]: prop.SetColor(color[0], color[1], color[2]) prop.SetShadow(False) return
def make_population(population_size, solution_generator, *args, **kwargs): """Make a population with the supplied generator.""" return [ solution_generator(*args, **kwargs) for _ in range(population_size) ]
Make a population with the supplied generator.
Below is the the instruction that describes the task: ### Input: Make a population with the supplied generator. ### Response: def make_population(population_size, solution_generator, *args, **kwargs): """Make a population with the supplied generator.""" return [ solution_generator(*args, **kwargs) for _ in range(population_size) ]
def depth_first_iter(self, self_first=True): """ Iterate over nodes below this node, optionally yielding children before self. """ if self_first: yield self for child in list(self.children): for i in child.depth_first_iter(self_first): yield i if not self_first: yield self
Iterate over nodes below this node, optionally yielding children before self.
Below is the the instruction that describes the task: ### Input: Iterate over nodes below this node, optionally yielding children before self. ### Response: def depth_first_iter(self, self_first=True): """ Iterate over nodes below this node, optionally yielding children before self. """ if self_first: yield self for child in list(self.children): for i in child.depth_first_iter(self_first): yield i if not self_first: yield self
def get_site_type_dummy_variables(self, sites): """ Binary rock/soil classification dummy variable based on sites.vs30. "``S`` is 1 for a rock site and 0 otherwise" (p. 1201). """ is_rock = np.array(sites.vs30 > self.NEHRP_BC_BOUNDARY) return is_rock
Binary rock/soil classification dummy variable based on sites.vs30. "``S`` is 1 for a rock site and 0 otherwise" (p. 1201).
Below is the the instruction that describes the task: ### Input: Binary rock/soil classification dummy variable based on sites.vs30. "``S`` is 1 for a rock site and 0 otherwise" (p. 1201). ### Response: def get_site_type_dummy_variables(self, sites): """ Binary rock/soil classification dummy variable based on sites.vs30. "``S`` is 1 for a rock site and 0 otherwise" (p. 1201). """ is_rock = np.array(sites.vs30 > self.NEHRP_BC_BOUNDARY) return is_rock
def remove_service(self, service): """Removes the service passed in from the services offered by the current Profile. If the Analysis Service passed in is not assigned to this Analysis Profile, returns False. :param service: the service to be removed from this Analysis Profile :type service: AnalysisService :return: True if the AnalysisService has been removed successfully """ obj = api.get_object(service) uid = api.get_uid(obj) # Remove the service from the referenced services services = self.getService() num_services = len(services) services.remove(obj) self.setService(services) removed = len(services) < num_services # Remove the service from the settings map settings = self.getAnalysisServicesSettings() settings = [item for item in settings if item.get('uid', '') != uid] self.setAnalysisServicesSettings(settings) return removed
Removes the service passed in from the services offered by the current Profile. If the Analysis Service passed in is not assigned to this Analysis Profile, returns False. :param service: the service to be removed from this Analysis Profile :type service: AnalysisService :return: True if the AnalysisService has been removed successfully
Below is the the instruction that describes the task: ### Input: Removes the service passed in from the services offered by the current Profile. If the Analysis Service passed in is not assigned to this Analysis Profile, returns False. :param service: the service to be removed from this Analysis Profile :type service: AnalysisService :return: True if the AnalysisService has been removed successfully ### Response: def remove_service(self, service): """Removes the service passed in from the services offered by the current Profile. If the Analysis Service passed in is not assigned to this Analysis Profile, returns False. :param service: the service to be removed from this Analysis Profile :type service: AnalysisService :return: True if the AnalysisService has been removed successfully """ obj = api.get_object(service) uid = api.get_uid(obj) # Remove the service from the referenced services services = self.getService() num_services = len(services) services.remove(obj) self.setService(services) removed = len(services) < num_services # Remove the service from the settings map settings = self.getAnalysisServicesSettings() settings = [item for item in settings if item.get('uid', '') != uid] self.setAnalysisServicesSettings(settings) return removed
def get(cls, name, raise_exc=True): """ Get the element by name. Does an exact match by element type. :param str name: name of element :param bool raise_exc: optionally disable exception. :raises ElementNotFound: if element does not exist :rtype: Element """ element = cls.objects.filter(name, exact_match=True).first() if \ name is not None else None if not element and raise_exc: raise ElementNotFound('Cannot find specified element: %s, type: ' '%s' % (name, cls.__name__)) return element
Get the element by name. Does an exact match by element type. :param str name: name of element :param bool raise_exc: optionally disable exception. :raises ElementNotFound: if element does not exist :rtype: Element
Below is the the instruction that describes the task: ### Input: Get the element by name. Does an exact match by element type. :param str name: name of element :param bool raise_exc: optionally disable exception. :raises ElementNotFound: if element does not exist :rtype: Element ### Response: def get(cls, name, raise_exc=True): """ Get the element by name. Does an exact match by element type. :param str name: name of element :param bool raise_exc: optionally disable exception. :raises ElementNotFound: if element does not exist :rtype: Element """ element = cls.objects.filter(name, exact_match=True).first() if \ name is not None else None if not element and raise_exc: raise ElementNotFound('Cannot find specified element: %s, type: ' '%s' % (name, cls.__name__)) return element
def clear(self, flush=True): """ Args: flush(bool): Flush stream after clearing progress bar (Default:True) Clear progress bar """ if self.enabled: self.manager.write(flush=flush, position=self.position)
Args: flush(bool): Flush stream after clearing progress bar (Default:True) Clear progress bar
Below is the the instruction that describes the task: ### Input: Args: flush(bool): Flush stream after clearing progress bar (Default:True) Clear progress bar ### Response: def clear(self, flush=True): """ Args: flush(bool): Flush stream after clearing progress bar (Default:True) Clear progress bar """ if self.enabled: self.manager.write(flush=flush, position=self.position)
def build_ctx(pythonpath=None): """ Decorator that makes decorated function use BuildContext instead of \ Context instance. BuildContext instance has more methods. :param pythonpath: Path or list of paths to add to environment variable PYTHONPATH. Each path can be absolute path, or relative path relative to top directory. Notice if this decorator is used without arguments, argument `pythonpath` is the decorated function. :return: Two situations: - If decorator arguments are given, return no-argument decorator. - If decorator arguments are not given, return wrapper function. """ # If argument `pythonpath` is string if isinstance(pythonpath, str): # Create paths list containing the string path_s = [pythonpath] # If argument `pythonpath` is list elif isinstance(pythonpath, list): # Use the list as paths list path_s = pythonpath # If argument `pythonpath` is not string or list, # it means the decorator is used without arguments. else: # Set paths list be None path_s = None # Create no-argument decorator def _noarg_decorator(func): """ No-argument decorator. :param func: Decorated function. :return: Wrapper function. """ # Create BuildContext subclass class _BuildContext(BuildContext): # Set command name for the context class cmd = func.__name__ # Set function name for the context class fun = func.__name__ # Create wrapper function @wraps(func) def _new_func(ctx, *args, **kwargs): """ Wrapper function. :param ctx: BuildContext object. :param \\*args: Other arguments passed to decorated function. :param \\*\\*kwargs: Other keyword arguments passed to decorated function. :return: Decorated function's call result. """ # If paths list is not empty if path_s: # For each path for path in path_s: # If the path is absolute path if os.path.isabs(path): # Use the path as absolute path abs_path = path # If the path is not absolute path, # it means relative path relative to top directory. else: # Create path node path_node = create_node(ctx, path) # Get absolute path abs_path = path_node.abspath() # Add the absolute path to environment variable PYTHONPATH add_pythonpath(abs_path) # Call the decorated function result = func(ctx, *args, **kwargs) # Return the call result return result # Store the created context class with the wrapper function _new_func._context_class = _BuildContext # pylint: disable=W0212 # Return the wrapper function return _new_func # If decorator arguments are given if path_s is not None: # Return no-argument decorator return _noarg_decorator # If decorator arguments are not given else: # Argument `pythonpath` is the decorated function _func = pythonpath # Call the no-argument decorator to create wrapper function wrapper_func = _noarg_decorator(_func) # Return the wrapper function return wrapper_func
Decorator that makes decorated function use BuildContext instead of \ Context instance. BuildContext instance has more methods. :param pythonpath: Path or list of paths to add to environment variable PYTHONPATH. Each path can be absolute path, or relative path relative to top directory. Notice if this decorator is used without arguments, argument `pythonpath` is the decorated function. :return: Two situations: - If decorator arguments are given, return no-argument decorator. - If decorator arguments are not given, return wrapper function.
Below is the the instruction that describes the task: ### Input: Decorator that makes decorated function use BuildContext instead of \ Context instance. BuildContext instance has more methods. :param pythonpath: Path or list of paths to add to environment variable PYTHONPATH. Each path can be absolute path, or relative path relative to top directory. Notice if this decorator is used without arguments, argument `pythonpath` is the decorated function. :return: Two situations: - If decorator arguments are given, return no-argument decorator. - If decorator arguments are not given, return wrapper function. ### Response: def build_ctx(pythonpath=None): """ Decorator that makes decorated function use BuildContext instead of \ Context instance. BuildContext instance has more methods. :param pythonpath: Path or list of paths to add to environment variable PYTHONPATH. Each path can be absolute path, or relative path relative to top directory. Notice if this decorator is used without arguments, argument `pythonpath` is the decorated function. :return: Two situations: - If decorator arguments are given, return no-argument decorator. - If decorator arguments are not given, return wrapper function. """ # If argument `pythonpath` is string if isinstance(pythonpath, str): # Create paths list containing the string path_s = [pythonpath] # If argument `pythonpath` is list elif isinstance(pythonpath, list): # Use the list as paths list path_s = pythonpath # If argument `pythonpath` is not string or list, # it means the decorator is used without arguments. else: # Set paths list be None path_s = None # Create no-argument decorator def _noarg_decorator(func): """ No-argument decorator. :param func: Decorated function. :return: Wrapper function. """ # Create BuildContext subclass class _BuildContext(BuildContext): # Set command name for the context class cmd = func.__name__ # Set function name for the context class fun = func.__name__ # Create wrapper function @wraps(func) def _new_func(ctx, *args, **kwargs): """ Wrapper function. :param ctx: BuildContext object. :param \\*args: Other arguments passed to decorated function. :param \\*\\*kwargs: Other keyword arguments passed to decorated function. :return: Decorated function's call result. """ # If paths list is not empty if path_s: # For each path for path in path_s: # If the path is absolute path if os.path.isabs(path): # Use the path as absolute path abs_path = path # If the path is not absolute path, # it means relative path relative to top directory. else: # Create path node path_node = create_node(ctx, path) # Get absolute path abs_path = path_node.abspath() # Add the absolute path to environment variable PYTHONPATH add_pythonpath(abs_path) # Call the decorated function result = func(ctx, *args, **kwargs) # Return the call result return result # Store the created context class with the wrapper function _new_func._context_class = _BuildContext # pylint: disable=W0212 # Return the wrapper function return _new_func # If decorator arguments are given if path_s is not None: # Return no-argument decorator return _noarg_decorator # If decorator arguments are not given else: # Argument `pythonpath` is the decorated function _func = pythonpath # Call the no-argument decorator to create wrapper function wrapper_func = _noarg_decorator(_func) # Return the wrapper function return wrapper_func
def get_context(template, line, num_lines=5, marker=None): ''' Returns debugging context around a line in a given string Returns:: string ''' template_lines = template.splitlines() num_template_lines = len(template_lines) # In test mode, a single line template would return a crazy line number like, # 357. Do this sanity check and if the given line is obviously wrong, just # return the entire template if line > num_template_lines: return template context_start = max(0, line - num_lines - 1) # subt 1 for 0-based indexing context_end = min(num_template_lines, line + num_lines) error_line_in_context = line - context_start - 1 # subtr 1 for 0-based idx buf = [] if context_start > 0: buf.append('[...]') error_line_in_context += 1 buf.extend(template_lines[context_start:context_end]) if context_end < num_template_lines: buf.append('[...]') if marker: buf[error_line_in_context] += marker return '---\n{0}\n---'.format('\n'.join(buf))
Returns debugging context around a line in a given string Returns:: string
Below is the the instruction that describes the task: ### Input: Returns debugging context around a line in a given string Returns:: string ### Response: def get_context(template, line, num_lines=5, marker=None): ''' Returns debugging context around a line in a given string Returns:: string ''' template_lines = template.splitlines() num_template_lines = len(template_lines) # In test mode, a single line template would return a crazy line number like, # 357. Do this sanity check and if the given line is obviously wrong, just # return the entire template if line > num_template_lines: return template context_start = max(0, line - num_lines - 1) # subt 1 for 0-based indexing context_end = min(num_template_lines, line + num_lines) error_line_in_context = line - context_start - 1 # subtr 1 for 0-based idx buf = [] if context_start > 0: buf.append('[...]') error_line_in_context += 1 buf.extend(template_lines[context_start:context_end]) if context_end < num_template_lines: buf.append('[...]') if marker: buf[error_line_in_context] += marker return '---\n{0}\n---'.format('\n'.join(buf))
def _prep_non_framed(self): """Prepare the opening data for a non-framed message.""" try: plaintext_length = self.stream_length self.__unframed_plaintext_cache = self.source_stream except NotSupportedError: # We need to know the plaintext length before we can start processing the data. # If we cannot seek on the source then we need to read the entire source into memory. self.__unframed_plaintext_cache = io.BytesIO() self.__unframed_plaintext_cache.write(self.source_stream.read()) plaintext_length = self.__unframed_plaintext_cache.tell() self.__unframed_plaintext_cache.seek(0) aad_content_string = aws_encryption_sdk.internal.utils.get_aad_content_string( content_type=self.content_type, is_final_frame=True ) associated_data = assemble_content_aad( message_id=self._header.message_id, aad_content_string=aad_content_string, seq_num=1, length=plaintext_length, ) self.encryptor = Encryptor( algorithm=self._encryption_materials.algorithm, key=self._derived_data_key, associated_data=associated_data, iv=non_framed_body_iv(self._encryption_materials.algorithm), ) self.output_buffer += serialize_non_framed_open( algorithm=self._encryption_materials.algorithm, iv=self.encryptor.iv, plaintext_length=plaintext_length, signer=self.signer, )
Prepare the opening data for a non-framed message.
Below is the the instruction that describes the task: ### Input: Prepare the opening data for a non-framed message. ### Response: def _prep_non_framed(self): """Prepare the opening data for a non-framed message.""" try: plaintext_length = self.stream_length self.__unframed_plaintext_cache = self.source_stream except NotSupportedError: # We need to know the plaintext length before we can start processing the data. # If we cannot seek on the source then we need to read the entire source into memory. self.__unframed_plaintext_cache = io.BytesIO() self.__unframed_plaintext_cache.write(self.source_stream.read()) plaintext_length = self.__unframed_plaintext_cache.tell() self.__unframed_plaintext_cache.seek(0) aad_content_string = aws_encryption_sdk.internal.utils.get_aad_content_string( content_type=self.content_type, is_final_frame=True ) associated_data = assemble_content_aad( message_id=self._header.message_id, aad_content_string=aad_content_string, seq_num=1, length=plaintext_length, ) self.encryptor = Encryptor( algorithm=self._encryption_materials.algorithm, key=self._derived_data_key, associated_data=associated_data, iv=non_framed_body_iv(self._encryption_materials.algorithm), ) self.output_buffer += serialize_non_framed_open( algorithm=self._encryption_materials.algorithm, iv=self.encryptor.iv, plaintext_length=plaintext_length, signer=self.signer, )
def list_devices(): """ List devices via HTTP GET. """ output = {} for device_id, device in devices.items(): output[device_id] = { 'host': device.host, 'state': device.state } return jsonify(devices=output)
List devices via HTTP GET.
Below is the the instruction that describes the task: ### Input: List devices via HTTP GET. ### Response: def list_devices(): """ List devices via HTTP GET. """ output = {} for device_id, device in devices.items(): output[device_id] = { 'host': device.host, 'state': device.state } return jsonify(devices=output)
def _extract_links_from_asset_tags_in_text(self, text): """ Scan the text and extract asset tags and links to corresponding files. @param text: Page text. @type text: str @return: @see CourseraOnDemand._extract_links_from_text """ # Extract asset tags from instructions text asset_tags_map = self._extract_asset_tags(text) ids = list(iterkeys(asset_tags_map)) if not ids: return {} # asset tags contain asset names and ids. We need to make another # HTTP request to get asset URL. asset_urls = self._extract_asset_urls(ids) supplement_links = {} # Build supplement links, providing nice titles along the way for asset in asset_urls: title = clean_filename( asset_tags_map[asset['id']]['name'], self._unrestricted_filenames) extension = clean_filename( asset_tags_map[asset['id']]['extension'].strip(), self._unrestricted_filenames) url = asset['url'].strip() if extension not in supplement_links: supplement_links[extension] = [] supplement_links[extension].append((url, title)) return supplement_links
Scan the text and extract asset tags and links to corresponding files. @param text: Page text. @type text: str @return: @see CourseraOnDemand._extract_links_from_text
Below is the the instruction that describes the task: ### Input: Scan the text and extract asset tags and links to corresponding files. @param text: Page text. @type text: str @return: @see CourseraOnDemand._extract_links_from_text ### Response: def _extract_links_from_asset_tags_in_text(self, text): """ Scan the text and extract asset tags and links to corresponding files. @param text: Page text. @type text: str @return: @see CourseraOnDemand._extract_links_from_text """ # Extract asset tags from instructions text asset_tags_map = self._extract_asset_tags(text) ids = list(iterkeys(asset_tags_map)) if not ids: return {} # asset tags contain asset names and ids. We need to make another # HTTP request to get asset URL. asset_urls = self._extract_asset_urls(ids) supplement_links = {} # Build supplement links, providing nice titles along the way for asset in asset_urls: title = clean_filename( asset_tags_map[asset['id']]['name'], self._unrestricted_filenames) extension = clean_filename( asset_tags_map[asset['id']]['extension'].strip(), self._unrestricted_filenames) url = asset['url'].strip() if extension not in supplement_links: supplement_links[extension] = [] supplement_links[extension].append((url, title)) return supplement_links
def lms(): '''Install and start a Logitech Media Server (lms). More infos: * http://wiki.slimdevices.com/index.php/Logitech_Media_Server * http://wiki.slimdevices.com/index.php/DebianPackage * http://www.mysqueezebox.com/download * XSqueeze on Kodi: * http://kodi.wiki/view/Add-on:XSqueeze * http://forum.kodi.tv/showthread.php?tid=122199 ''' # cf. http://wiki.slimdevices.com/index.php/DebianPackage#installing_7.9.0 cmds = '''\ url="http://www.mysqueezebox.com/update/?version=7.9.0&revision=1&geturl=1&os=deb" latest_lms=$(wget -q -O - "$url") mkdir -p ~/.logitech_media_server_sources cd ~/.logitech_media_server_sources wget $latest_lms lms_deb=${latest_lms##*/} sudo dpkg -i $lms_deb ''' run(cmds) run('sudo usermod -aG audio squeezeboxserver') with warn_only(): run('sudo addgroup lms') run('sudo usermod -aG lms squeezeboxserver') username = env.user run(flo('sudo usermod -aG audio {username}')) print('\n Set correct folder permissions manually, eg:') print(' > ' + cyan(flo('chown -R {username}.lms <path/to/your/media>'))) hostname = env.host print(flo('\n lms frontend available at http://{hostname}:9000'))
Install and start a Logitech Media Server (lms). More infos: * http://wiki.slimdevices.com/index.php/Logitech_Media_Server * http://wiki.slimdevices.com/index.php/DebianPackage * http://www.mysqueezebox.com/download * XSqueeze on Kodi: * http://kodi.wiki/view/Add-on:XSqueeze * http://forum.kodi.tv/showthread.php?tid=122199
Below is the the instruction that describes the task: ### Input: Install and start a Logitech Media Server (lms). More infos: * http://wiki.slimdevices.com/index.php/Logitech_Media_Server * http://wiki.slimdevices.com/index.php/DebianPackage * http://www.mysqueezebox.com/download * XSqueeze on Kodi: * http://kodi.wiki/view/Add-on:XSqueeze * http://forum.kodi.tv/showthread.php?tid=122199 ### Response: def lms(): '''Install and start a Logitech Media Server (lms). More infos: * http://wiki.slimdevices.com/index.php/Logitech_Media_Server * http://wiki.slimdevices.com/index.php/DebianPackage * http://www.mysqueezebox.com/download * XSqueeze on Kodi: * http://kodi.wiki/view/Add-on:XSqueeze * http://forum.kodi.tv/showthread.php?tid=122199 ''' # cf. http://wiki.slimdevices.com/index.php/DebianPackage#installing_7.9.0 cmds = '''\ url="http://www.mysqueezebox.com/update/?version=7.9.0&revision=1&geturl=1&os=deb" latest_lms=$(wget -q -O - "$url") mkdir -p ~/.logitech_media_server_sources cd ~/.logitech_media_server_sources wget $latest_lms lms_deb=${latest_lms##*/} sudo dpkg -i $lms_deb ''' run(cmds) run('sudo usermod -aG audio squeezeboxserver') with warn_only(): run('sudo addgroup lms') run('sudo usermod -aG lms squeezeboxserver') username = env.user run(flo('sudo usermod -aG audio {username}')) print('\n Set correct folder permissions manually, eg:') print(' > ' + cyan(flo('chown -R {username}.lms <path/to/your/media>'))) hostname = env.host print(flo('\n lms frontend available at http://{hostname}:9000'))
def download_SRA(self, email, directory='series', filterby=None, nproc=1, **kwargs): """Download SRA files for each GSM in series. .. warning:: Do not use parallel option (nproc > 1) in the interactive shell. For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_ on SO. Args: email (:obj:`str`): E-mail that will be provided to the Entrez. directory (:obj:`str`, optional): Directory to save the data (defaults to the 'series' which saves the data to the directory with the name of the series + '_SRA' ending). Defaults to "series". filterby (:obj:`str`, optional): Filter GSM objects, argument is a function that operates on GSM object and return bool eg. lambda x: "brain" not in x.name. Defaults to None. nproc (:obj:`int`, optional): Number of processes for SRA download (default is 1, no parallelization). **kwargs: Any arbitrary argument passed to GSM.download_SRA method. See the documentation for more details. Returns: :obj:`dict`: A dictionary containing output of ``GSM.download_SRA`` method where each GSM accession ID is the key for the output. """ if directory == 'series': dirpath = os.path.abspath(self.get_accession() + "_SRA") utils.mkdir_p(dirpath) else: dirpath = os.path.abspath(directory) utils.mkdir_p(dirpath) if filterby is not None: gsms_to_use = [gsm for gsm in self.gsms.values() if filterby(gsm)] else: gsms_to_use = self.gsms.values() if nproc == 1: # No need to parallelize, running ordinary download in loop downloaded_paths = dict() for gsm in gsms_to_use: logger.info( "Downloading SRA files for %s series\n" % gsm.name) downloaded_paths[gsm.name] = gsm.download_SRA( email=email, directory=dirpath, **kwargs) elif nproc > 1: # Parallelization enabled downloaders = list() # Collecting params for Pool.map in a loop for gsm in gsms_to_use: downloaders.append([ gsm, email, dirpath, kwargs]) p = Pool(nproc) results = p.map(_sra_download_worker, downloaders) downloaded_paths = dict(results) else: raise ValueError("Nproc should be non-negative: %s" % str(nproc)) return downloaded_paths
Download SRA files for each GSM in series. .. warning:: Do not use parallel option (nproc > 1) in the interactive shell. For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_ on SO. Args: email (:obj:`str`): E-mail that will be provided to the Entrez. directory (:obj:`str`, optional): Directory to save the data (defaults to the 'series' which saves the data to the directory with the name of the series + '_SRA' ending). Defaults to "series". filterby (:obj:`str`, optional): Filter GSM objects, argument is a function that operates on GSM object and return bool eg. lambda x: "brain" not in x.name. Defaults to None. nproc (:obj:`int`, optional): Number of processes for SRA download (default is 1, no parallelization). **kwargs: Any arbitrary argument passed to GSM.download_SRA method. See the documentation for more details. Returns: :obj:`dict`: A dictionary containing output of ``GSM.download_SRA`` method where each GSM accession ID is the key for the output.
Below is the the instruction that describes the task: ### Input: Download SRA files for each GSM in series. .. warning:: Do not use parallel option (nproc > 1) in the interactive shell. For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_ on SO. Args: email (:obj:`str`): E-mail that will be provided to the Entrez. directory (:obj:`str`, optional): Directory to save the data (defaults to the 'series' which saves the data to the directory with the name of the series + '_SRA' ending). Defaults to "series". filterby (:obj:`str`, optional): Filter GSM objects, argument is a function that operates on GSM object and return bool eg. lambda x: "brain" not in x.name. Defaults to None. nproc (:obj:`int`, optional): Number of processes for SRA download (default is 1, no parallelization). **kwargs: Any arbitrary argument passed to GSM.download_SRA method. See the documentation for more details. Returns: :obj:`dict`: A dictionary containing output of ``GSM.download_SRA`` method where each GSM accession ID is the key for the output. ### Response: def download_SRA(self, email, directory='series', filterby=None, nproc=1, **kwargs): """Download SRA files for each GSM in series. .. warning:: Do not use parallel option (nproc > 1) in the interactive shell. For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_ on SO. Args: email (:obj:`str`): E-mail that will be provided to the Entrez. directory (:obj:`str`, optional): Directory to save the data (defaults to the 'series' which saves the data to the directory with the name of the series + '_SRA' ending). Defaults to "series". filterby (:obj:`str`, optional): Filter GSM objects, argument is a function that operates on GSM object and return bool eg. lambda x: "brain" not in x.name. Defaults to None. nproc (:obj:`int`, optional): Number of processes for SRA download (default is 1, no parallelization). **kwargs: Any arbitrary argument passed to GSM.download_SRA method. See the documentation for more details. Returns: :obj:`dict`: A dictionary containing output of ``GSM.download_SRA`` method where each GSM accession ID is the key for the output. """ if directory == 'series': dirpath = os.path.abspath(self.get_accession() + "_SRA") utils.mkdir_p(dirpath) else: dirpath = os.path.abspath(directory) utils.mkdir_p(dirpath) if filterby is not None: gsms_to_use = [gsm for gsm in self.gsms.values() if filterby(gsm)] else: gsms_to_use = self.gsms.values() if nproc == 1: # No need to parallelize, running ordinary download in loop downloaded_paths = dict() for gsm in gsms_to_use: logger.info( "Downloading SRA files for %s series\n" % gsm.name) downloaded_paths[gsm.name] = gsm.download_SRA( email=email, directory=dirpath, **kwargs) elif nproc > 1: # Parallelization enabled downloaders = list() # Collecting params for Pool.map in a loop for gsm in gsms_to_use: downloaders.append([ gsm, email, dirpath, kwargs]) p = Pool(nproc) results = p.map(_sra_download_worker, downloaders) downloaded_paths = dict(results) else: raise ValueError("Nproc should be non-negative: %s" % str(nproc)) return downloaded_paths
def analyze_event_rate(scan_base, combine_n_readouts=1000, time_line_absolute=True, output_pdf=None, output_file=None): ''' Determines the number of events as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The number of events is taken from the meta data info and stored into a pdf file. Parameters ---------- scan_base: list of str scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ] combine_n_readouts: int the number of read outs to combine (e.g. 1000) time_line_absolute: bool if true the analysis uses absolute time stamps output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen ''' time_stamp = [] rate = [] start_time_set = False for data_file in scan_base: with tb.open_file(data_file + '_interpreted.h5', mode="r") as in_file_h5: meta_data_array = in_file_h5.root.meta_data[:] parameter_ranges = np.column_stack((analysis_utils.get_ranges_from_array(meta_data_array['timestamp_start'][::combine_n_readouts]), analysis_utils.get_ranges_from_array(meta_data_array['event_number'][::combine_n_readouts]))) if time_line_absolute: time_stamp.extend(parameter_ranges[:-1, 0]) else: if not start_time_set: start_time = parameter_ranges[0, 0] start_time_set = True time_stamp.extend((parameter_ranges[:-1, 0] - start_time) / 60.0) rate.extend((parameter_ranges[:-1, 3] - parameter_ranges[:-1, 2]) / (parameter_ranges[:-1, 1] - parameter_ranges[:-1, 0])) # d#Events / dt if time_line_absolute: plotting.plot_scatter_time(time_stamp, rate, title='Event rate [Hz]', marker_style='o', filename=output_pdf) else: plotting.plot_scatter(time_stamp, rate, title='Events per time', x_label='Progressed time [min.]', y_label='Events rate [Hz]', marker_style='o', filename=output_pdf) if output_file: with tb.open_file(output_file, mode="a") as out_file_h5: rec_array = np.array(zip(time_stamp, rate), dtype=[('time_stamp', float), ('rate', float)]).view(np.recarray) try: rate_table = out_file_h5.create_table(out_file_h5.root, name='Eventrate', description=rec_array, title='Event rate', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) rate_table[:] = rec_array except tb.exceptions.NodeError: logging.warning(output_file + ' has already a Eventrate note, do not overwrite existing.') return time_stamp, rate
Determines the number of events as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The number of events is taken from the meta data info and stored into a pdf file. Parameters ---------- scan_base: list of str scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ] combine_n_readouts: int the number of read outs to combine (e.g. 1000) time_line_absolute: bool if true the analysis uses absolute time stamps output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen
Below is the the instruction that describes the task: ### Input: Determines the number of events as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The number of events is taken from the meta data info and stored into a pdf file. Parameters ---------- scan_base: list of str scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ] combine_n_readouts: int the number of read outs to combine (e.g. 1000) time_line_absolute: bool if true the analysis uses absolute time stamps output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen ### Response: def analyze_event_rate(scan_base, combine_n_readouts=1000, time_line_absolute=True, output_pdf=None, output_file=None): ''' Determines the number of events as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The number of events is taken from the meta data info and stored into a pdf file. Parameters ---------- scan_base: list of str scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ] combine_n_readouts: int the number of read outs to combine (e.g. 1000) time_line_absolute: bool if true the analysis uses absolute time stamps output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen ''' time_stamp = [] rate = [] start_time_set = False for data_file in scan_base: with tb.open_file(data_file + '_interpreted.h5', mode="r") as in_file_h5: meta_data_array = in_file_h5.root.meta_data[:] parameter_ranges = np.column_stack((analysis_utils.get_ranges_from_array(meta_data_array['timestamp_start'][::combine_n_readouts]), analysis_utils.get_ranges_from_array(meta_data_array['event_number'][::combine_n_readouts]))) if time_line_absolute: time_stamp.extend(parameter_ranges[:-1, 0]) else: if not start_time_set: start_time = parameter_ranges[0, 0] start_time_set = True time_stamp.extend((parameter_ranges[:-1, 0] - start_time) / 60.0) rate.extend((parameter_ranges[:-1, 3] - parameter_ranges[:-1, 2]) / (parameter_ranges[:-1, 1] - parameter_ranges[:-1, 0])) # d#Events / dt if time_line_absolute: plotting.plot_scatter_time(time_stamp, rate, title='Event rate [Hz]', marker_style='o', filename=output_pdf) else: plotting.plot_scatter(time_stamp, rate, title='Events per time', x_label='Progressed time [min.]', y_label='Events rate [Hz]', marker_style='o', filename=output_pdf) if output_file: with tb.open_file(output_file, mode="a") as out_file_h5: rec_array = np.array(zip(time_stamp, rate), dtype=[('time_stamp', float), ('rate', float)]).view(np.recarray) try: rate_table = out_file_h5.create_table(out_file_h5.root, name='Eventrate', description=rec_array, title='Event rate', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) rate_table[:] = rec_array except tb.exceptions.NodeError: logging.warning(output_file + ' has already a Eventrate note, do not overwrite existing.') return time_stamp, rate
def classical_damage( fragility_functions, hazard_imls, hazard_poes, investigation_time, risk_investigation_time): """ :param fragility_functions: a list of fragility functions for each damage state :param hazard_imls: Intensity Measure Levels :param hazard_poes: hazard curve :param investigation_time: hazard investigation time :param risk_investigation_time: risk investigation time :returns: an array of M probabilities of occurrence where M is the numbers of damage states. """ spi = fragility_functions.steps_per_interval if spi and spi > 1: # interpolate imls = numpy.array(fragility_functions.interp_imls) min_val, max_val = hazard_imls[0], hazard_imls[-1] assert min_val > 0, hazard_imls # sanity check numpy.putmask(imls, imls < min_val, min_val) numpy.putmask(imls, imls > max_val, max_val) poes = interpolate.interp1d(hazard_imls, hazard_poes)(imls) else: imls = (hazard_imls if fragility_functions.format == 'continuous' else fragility_functions.imls) poes = numpy.array(hazard_poes) afe = annual_frequency_of_exceedence(poes, investigation_time) annual_frequency_of_occurrence = pairwise_diff( pairwise_mean([afe[0]] + list(afe) + [afe[-1]])) poes_per_damage_state = [] for ff in fragility_functions: frequency_of_exceedence_per_damage_state = numpy.dot( annual_frequency_of_occurrence, list(map(ff, imls))) poe_per_damage_state = 1. - numpy.exp( - frequency_of_exceedence_per_damage_state * risk_investigation_time) poes_per_damage_state.append(poe_per_damage_state) poos = pairwise_diff([1] + poes_per_damage_state + [0]) return poos
:param fragility_functions: a list of fragility functions for each damage state :param hazard_imls: Intensity Measure Levels :param hazard_poes: hazard curve :param investigation_time: hazard investigation time :param risk_investigation_time: risk investigation time :returns: an array of M probabilities of occurrence where M is the numbers of damage states.
Below is the the instruction that describes the task: ### Input: :param fragility_functions: a list of fragility functions for each damage state :param hazard_imls: Intensity Measure Levels :param hazard_poes: hazard curve :param investigation_time: hazard investigation time :param risk_investigation_time: risk investigation time :returns: an array of M probabilities of occurrence where M is the numbers of damage states. ### Response: def classical_damage( fragility_functions, hazard_imls, hazard_poes, investigation_time, risk_investigation_time): """ :param fragility_functions: a list of fragility functions for each damage state :param hazard_imls: Intensity Measure Levels :param hazard_poes: hazard curve :param investigation_time: hazard investigation time :param risk_investigation_time: risk investigation time :returns: an array of M probabilities of occurrence where M is the numbers of damage states. """ spi = fragility_functions.steps_per_interval if spi and spi > 1: # interpolate imls = numpy.array(fragility_functions.interp_imls) min_val, max_val = hazard_imls[0], hazard_imls[-1] assert min_val > 0, hazard_imls # sanity check numpy.putmask(imls, imls < min_val, min_val) numpy.putmask(imls, imls > max_val, max_val) poes = interpolate.interp1d(hazard_imls, hazard_poes)(imls) else: imls = (hazard_imls if fragility_functions.format == 'continuous' else fragility_functions.imls) poes = numpy.array(hazard_poes) afe = annual_frequency_of_exceedence(poes, investigation_time) annual_frequency_of_occurrence = pairwise_diff( pairwise_mean([afe[0]] + list(afe) + [afe[-1]])) poes_per_damage_state = [] for ff in fragility_functions: frequency_of_exceedence_per_damage_state = numpy.dot( annual_frequency_of_occurrence, list(map(ff, imls))) poe_per_damage_state = 1. - numpy.exp( - frequency_of_exceedence_per_damage_state * risk_investigation_time) poes_per_damage_state.append(poe_per_damage_state) poos = pairwise_diff([1] + poes_per_damage_state + [0]) return poos
def to_gufunc_string(self): """Create an equivalent signature string for a NumPy gufunc. Unlike __str__, handles dimensions that don't map to Python identifiers. """ all_dims = self.all_core_dims dims_map = dict(zip(sorted(all_dims), range(len(all_dims)))) input_core_dims = [['dim%d' % dims_map[dim] for dim in core_dims] for core_dims in self.input_core_dims] output_core_dims = [['dim%d' % dims_map[dim] for dim in core_dims] for core_dims in self.output_core_dims] alt_signature = type(self)(input_core_dims, output_core_dims) return str(alt_signature)
Create an equivalent signature string for a NumPy gufunc. Unlike __str__, handles dimensions that don't map to Python identifiers.
Below is the the instruction that describes the task: ### Input: Create an equivalent signature string for a NumPy gufunc. Unlike __str__, handles dimensions that don't map to Python identifiers. ### Response: def to_gufunc_string(self): """Create an equivalent signature string for a NumPy gufunc. Unlike __str__, handles dimensions that don't map to Python identifiers. """ all_dims = self.all_core_dims dims_map = dict(zip(sorted(all_dims), range(len(all_dims)))) input_core_dims = [['dim%d' % dims_map[dim] for dim in core_dims] for core_dims in self.input_core_dims] output_core_dims = [['dim%d' % dims_map[dim] for dim in core_dims] for core_dims in self.output_core_dims] alt_signature = type(self)(input_core_dims, output_core_dims) return str(alt_signature)
def delete_queue(name, region, opts=None, user=None): ''' Deletes a queue in the region. name Name of the SQS queue to deletes region Name of the region to delete the queue from opts : None Any additional options to add to the command line user : None Run hg as a user other than what the minion runs as CLI Example: salt '*' aws_sqs.delete_queue <sqs queue> <region> ''' queues = list_queues(region, opts, user) url_map = _parse_queue_list(queues) log.debug('map %s', url_map) if name in url_map: delete = {'queue-url': url_map[name]} rtn = _run_aws( 'delete-queue', region=region, opts=opts, user=user, **delete) success = True err = '' out = '{0} deleted'.format(name) else: out = '' err = "Delete failed" success = False ret = { 'retcode': 0 if success else 1, 'stdout': out, 'stderr': err, } return ret
Deletes a queue in the region. name Name of the SQS queue to deletes region Name of the region to delete the queue from opts : None Any additional options to add to the command line user : None Run hg as a user other than what the minion runs as CLI Example: salt '*' aws_sqs.delete_queue <sqs queue> <region>
Below is the the instruction that describes the task: ### Input: Deletes a queue in the region. name Name of the SQS queue to deletes region Name of the region to delete the queue from opts : None Any additional options to add to the command line user : None Run hg as a user other than what the minion runs as CLI Example: salt '*' aws_sqs.delete_queue <sqs queue> <region> ### Response: def delete_queue(name, region, opts=None, user=None): ''' Deletes a queue in the region. name Name of the SQS queue to deletes region Name of the region to delete the queue from opts : None Any additional options to add to the command line user : None Run hg as a user other than what the minion runs as CLI Example: salt '*' aws_sqs.delete_queue <sqs queue> <region> ''' queues = list_queues(region, opts, user) url_map = _parse_queue_list(queues) log.debug('map %s', url_map) if name in url_map: delete = {'queue-url': url_map[name]} rtn = _run_aws( 'delete-queue', region=region, opts=opts, user=user, **delete) success = True err = '' out = '{0} deleted'.format(name) else: out = '' err = "Delete failed" success = False ret = { 'retcode': 0 if success else 1, 'stdout': out, 'stderr': err, } return ret
def mgl_seq(x): """ Sequence whose sum is the Madhava-Gregory-Leibniz series. [x, -x^3/3, x^5/5, -x^7/7, x^9/9, -x^11/11, ...] Returns ------- An endless sequence that has the property ``atan(x) = sum(mgl_seq(x))``. Usually you would use the ``atan()`` function, not this one. """ odd_numbers = thub(count(start=1, step=2), 2) return Stream(1, -1) * x ** odd_numbers / odd_numbers
Sequence whose sum is the Madhava-Gregory-Leibniz series. [x, -x^3/3, x^5/5, -x^7/7, x^9/9, -x^11/11, ...] Returns ------- An endless sequence that has the property ``atan(x) = sum(mgl_seq(x))``. Usually you would use the ``atan()`` function, not this one.
Below is the the instruction that describes the task: ### Input: Sequence whose sum is the Madhava-Gregory-Leibniz series. [x, -x^3/3, x^5/5, -x^7/7, x^9/9, -x^11/11, ...] Returns ------- An endless sequence that has the property ``atan(x) = sum(mgl_seq(x))``. Usually you would use the ``atan()`` function, not this one. ### Response: def mgl_seq(x): """ Sequence whose sum is the Madhava-Gregory-Leibniz series. [x, -x^3/3, x^5/5, -x^7/7, x^9/9, -x^11/11, ...] Returns ------- An endless sequence that has the property ``atan(x) = sum(mgl_seq(x))``. Usually you would use the ``atan()`` function, not this one. """ odd_numbers = thub(count(start=1, step=2), 2) return Stream(1, -1) * x ** odd_numbers / odd_numbers
def launch_request(self, uri_pattern, body, method, headers=None, parameters=None, **kwargs): """ Launch HTTP request to the API with given arguments :param uri_pattern: string pattern of the full API url with keyword arguments (format string syntax) :param body: Raw Body content (string) (Plain/XML/JSON to be sent) :param method: HTTP ver to be used in the request [GET | POST | PUT | DELETE | UPDATE ] :param headers: HTTP header (dict) :param parameters: Query parameters for the URL. i.e. {'key1': 'value1', 'key2': 'value2'} :param **kwargs: URL parameters (without url_root) to fill the patters :returns: REST API response ('Requests' response) """ return self._call_api(uri_pattern, method, body, headers, parameters, **kwargs)
Launch HTTP request to the API with given arguments :param uri_pattern: string pattern of the full API url with keyword arguments (format string syntax) :param body: Raw Body content (string) (Plain/XML/JSON to be sent) :param method: HTTP ver to be used in the request [GET | POST | PUT | DELETE | UPDATE ] :param headers: HTTP header (dict) :param parameters: Query parameters for the URL. i.e. {'key1': 'value1', 'key2': 'value2'} :param **kwargs: URL parameters (without url_root) to fill the patters :returns: REST API response ('Requests' response)
Below is the the instruction that describes the task: ### Input: Launch HTTP request to the API with given arguments :param uri_pattern: string pattern of the full API url with keyword arguments (format string syntax) :param body: Raw Body content (string) (Plain/XML/JSON to be sent) :param method: HTTP ver to be used in the request [GET | POST | PUT | DELETE | UPDATE ] :param headers: HTTP header (dict) :param parameters: Query parameters for the URL. i.e. {'key1': 'value1', 'key2': 'value2'} :param **kwargs: URL parameters (without url_root) to fill the patters :returns: REST API response ('Requests' response) ### Response: def launch_request(self, uri_pattern, body, method, headers=None, parameters=None, **kwargs): """ Launch HTTP request to the API with given arguments :param uri_pattern: string pattern of the full API url with keyword arguments (format string syntax) :param body: Raw Body content (string) (Plain/XML/JSON to be sent) :param method: HTTP ver to be used in the request [GET | POST | PUT | DELETE | UPDATE ] :param headers: HTTP header (dict) :param parameters: Query parameters for the URL. i.e. {'key1': 'value1', 'key2': 'value2'} :param **kwargs: URL parameters (without url_root) to fill the patters :returns: REST API response ('Requests' response) """ return self._call_api(uri_pattern, method, body, headers, parameters, **kwargs)
def main(argv): """Main """ parameters = process_arguments(argv) file_list = sed_eval.io.load_file_pair_list(parameters['file_list']) path = os.path.dirname(parameters['file_list']) data = [] all_data = dcase_util.containers.MetaDataContainer() for file_pair in file_list: reference_event_list = sed_eval.io.load_event_list( os.path.abspath(os.path.join(path, file_pair['reference_file'])) ) estimated_event_list = sed_eval.io.load_event_list( os.path.abspath(os.path.join(path, file_pair['estimated_file'])) ) data.append({ 'reference_event_list': reference_event_list, 'estimated_event_list': estimated_event_list }) all_data += reference_event_list event_labels = all_data.unique_event_labels segment_based_metrics = sed_eval.sound_event.SegmentBasedMetrics(event_labels) event_based_metrics = sed_eval.sound_event.EventBasedMetrics(event_labels) for file_pair in data: segment_based_metrics.evaluate( file_pair['reference_event_list'], file_pair['estimated_event_list'] ) event_based_metrics.evaluate( file_pair['reference_event_list'], file_pair['estimated_event_list'] ) if parameters['output_file']: results = dcase_util.containers.DictContainer({ 'segment_based_metrics': segment_based_metrics.results(), 'event_based_metrics': event_based_metrics.results() }).save(parameters['output_file']) else: print(segment_based_metrics) print(event_based_metrics)
Main
Below is the the instruction that describes the task: ### Input: Main ### Response: def main(argv): """Main """ parameters = process_arguments(argv) file_list = sed_eval.io.load_file_pair_list(parameters['file_list']) path = os.path.dirname(parameters['file_list']) data = [] all_data = dcase_util.containers.MetaDataContainer() for file_pair in file_list: reference_event_list = sed_eval.io.load_event_list( os.path.abspath(os.path.join(path, file_pair['reference_file'])) ) estimated_event_list = sed_eval.io.load_event_list( os.path.abspath(os.path.join(path, file_pair['estimated_file'])) ) data.append({ 'reference_event_list': reference_event_list, 'estimated_event_list': estimated_event_list }) all_data += reference_event_list event_labels = all_data.unique_event_labels segment_based_metrics = sed_eval.sound_event.SegmentBasedMetrics(event_labels) event_based_metrics = sed_eval.sound_event.EventBasedMetrics(event_labels) for file_pair in data: segment_based_metrics.evaluate( file_pair['reference_event_list'], file_pair['estimated_event_list'] ) event_based_metrics.evaluate( file_pair['reference_event_list'], file_pair['estimated_event_list'] ) if parameters['output_file']: results = dcase_util.containers.DictContainer({ 'segment_based_metrics': segment_based_metrics.results(), 'event_based_metrics': event_based_metrics.results() }).save(parameters['output_file']) else: print(segment_based_metrics) print(event_based_metrics)
def access(self, app_subdomain): ''' a method to validate user can access app ''' title = '%s.access' % self.__class__.__name__ # validate input input_fields = { 'app_subdomain': app_subdomain } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # verbosity self.printer('Checking access to "%s" subdomain ... ' % app_subdomain, flush=True) # confirm existence of subdomain for app in self.apps: if app['name'] == app_subdomain: self.subdomain = app_subdomain break # refresh app list and search again if not self.subdomain: import json response = self._handle_command('heroku apps --json', handle_error=True) self.apps = json.loads(response) for app in self.apps: if app['name'] == app_subdomain: self.subdomain = app_subdomain break # check reason for failure if not self.subdomain: sys_command = 'heroku ps -a %s' % app_subdomain heroku_response = self._handle_command(sys_command, handle_error=True) if heroku_response.find('find that app') > -1: self.printer('ERROR') raise Exception('%s does not exist. Try: heroku create -a %s' % (app_subdomain, app_subdomain)) elif heroku_response.find('have access to the app') > -1: self.printer('ERROR') raise Exception('%s belongs to another account.' % app_subdomain) else: self.printer('ERROR') raise Exception('Some unknown issue prevents you from accessing %s' % app_subdomain) self.printer('done.') return self
a method to validate user can access app
Below is the the instruction that describes the task: ### Input: a method to validate user can access app ### Response: def access(self, app_subdomain): ''' a method to validate user can access app ''' title = '%s.access' % self.__class__.__name__ # validate input input_fields = { 'app_subdomain': app_subdomain } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # verbosity self.printer('Checking access to "%s" subdomain ... ' % app_subdomain, flush=True) # confirm existence of subdomain for app in self.apps: if app['name'] == app_subdomain: self.subdomain = app_subdomain break # refresh app list and search again if not self.subdomain: import json response = self._handle_command('heroku apps --json', handle_error=True) self.apps = json.loads(response) for app in self.apps: if app['name'] == app_subdomain: self.subdomain = app_subdomain break # check reason for failure if not self.subdomain: sys_command = 'heroku ps -a %s' % app_subdomain heroku_response = self._handle_command(sys_command, handle_error=True) if heroku_response.find('find that app') > -1: self.printer('ERROR') raise Exception('%s does not exist. Try: heroku create -a %s' % (app_subdomain, app_subdomain)) elif heroku_response.find('have access to the app') > -1: self.printer('ERROR') raise Exception('%s belongs to another account.' % app_subdomain) else: self.printer('ERROR') raise Exception('Some unknown issue prevents you from accessing %s' % app_subdomain) self.printer('done.') return self
def interruptWrite(self, endpoint, buffer, timeout = 100): r"""Perform a interrupt write request to the endpoint specified. Arguments: endpoint: endpoint number. buffer: sequence data buffer to write. This parameter can be any sequence type. timeout: operation timeout in milliseconds. (default: 100) Returns the number of bytes written. """ return self.dev.write(endpoint, buffer, timeout)
r"""Perform a interrupt write request to the endpoint specified. Arguments: endpoint: endpoint number. buffer: sequence data buffer to write. This parameter can be any sequence type. timeout: operation timeout in milliseconds. (default: 100) Returns the number of bytes written.
Below is the the instruction that describes the task: ### Input: r"""Perform a interrupt write request to the endpoint specified. Arguments: endpoint: endpoint number. buffer: sequence data buffer to write. This parameter can be any sequence type. timeout: operation timeout in milliseconds. (default: 100) Returns the number of bytes written. ### Response: def interruptWrite(self, endpoint, buffer, timeout = 100): r"""Perform a interrupt write request to the endpoint specified. Arguments: endpoint: endpoint number. buffer: sequence data buffer to write. This parameter can be any sequence type. timeout: operation timeout in milliseconds. (default: 100) Returns the number of bytes written. """ return self.dev.write(endpoint, buffer, timeout)
def flattened(self): """return flattened data ``(x, f)`` such that for the sweep through coordinate ``i`` we have for data point ``j`` that ``f[i][j] == func(x[i][j])`` """ flatx = {} flatf = {} for i in self.res: if isinstance(i, int): flatx[i] = [] flatf[i] = [] for x in sorted(self.res[i]): for d in sorted(self.res[i][x]): flatx[i].append(x) flatf[i].append(d) return flatx, flatf
return flattened data ``(x, f)`` such that for the sweep through coordinate ``i`` we have for data point ``j`` that ``f[i][j] == func(x[i][j])``
Below is the the instruction that describes the task: ### Input: return flattened data ``(x, f)`` such that for the sweep through coordinate ``i`` we have for data point ``j`` that ``f[i][j] == func(x[i][j])`` ### Response: def flattened(self): """return flattened data ``(x, f)`` such that for the sweep through coordinate ``i`` we have for data point ``j`` that ``f[i][j] == func(x[i][j])`` """ flatx = {} flatf = {} for i in self.res: if isinstance(i, int): flatx[i] = [] flatf[i] = [] for x in sorted(self.res[i]): for d in sorted(self.res[i][x]): flatx[i].append(x) flatf[i].append(d) return flatx, flatf
def cart2pol(x, y): """Cartesian to Polar coordinates conversion.""" theta = np.arctan2(y, x) rho = np.hypot(x, y) return theta, rho
Cartesian to Polar coordinates conversion.
Below is the the instruction that describes the task: ### Input: Cartesian to Polar coordinates conversion. ### Response: def cart2pol(x, y): """Cartesian to Polar coordinates conversion.""" theta = np.arctan2(y, x) rho = np.hypot(x, y) return theta, rho
def _show_message(self, text): """Show message on splash screen.""" self.splash.showMessage(text, Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute, QColor(Qt.white))
Show message on splash screen.
Below is the the instruction that describes the task: ### Input: Show message on splash screen. ### Response: def _show_message(self, text): """Show message on splash screen.""" self.splash.showMessage(text, Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute, QColor(Qt.white))
def update(self, truth, guess, features): """Update the feature weights.""" def upd_feat(c, f, w, v): param = (f, c) self._totals[param] += (self.i - self._tstamps[param]) * w self._tstamps[param] = self.i self.weights[f][c] = w + v self.i += 1 if truth == guess: return None for f in features: weights = self.weights.setdefault(f, {}) upd_feat(truth, f, weights.get(truth, 0.0), 1.0) upd_feat(guess, f, weights.get(guess, 0.0), -1.0) return None
Update the feature weights.
Below is the the instruction that describes the task: ### Input: Update the feature weights. ### Response: def update(self, truth, guess, features): """Update the feature weights.""" def upd_feat(c, f, w, v): param = (f, c) self._totals[param] += (self.i - self._tstamps[param]) * w self._tstamps[param] = self.i self.weights[f][c] = w + v self.i += 1 if truth == guess: return None for f in features: weights = self.weights.setdefault(f, {}) upd_feat(truth, f, weights.get(truth, 0.0), 1.0) upd_feat(guess, f, weights.get(guess, 0.0), -1.0) return None
def create(self, email, verify=None, components=None): """Create a new subscriber :param str email: Email address to subscribe :param bool verify: Whether to send verification email :param list components: Components ID list, defaults to all :return: Created subscriber data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#subscribers """ data = ApiParams() data['email'] = email data['verify'] = verify data['components'] = components return self._post('subscribers', data=data)['data']
Create a new subscriber :param str email: Email address to subscribe :param bool verify: Whether to send verification email :param list components: Components ID list, defaults to all :return: Created subscriber data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#subscribers
Below is the the instruction that describes the task: ### Input: Create a new subscriber :param str email: Email address to subscribe :param bool verify: Whether to send verification email :param list components: Components ID list, defaults to all :return: Created subscriber data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#subscribers ### Response: def create(self, email, verify=None, components=None): """Create a new subscriber :param str email: Email address to subscribe :param bool verify: Whether to send verification email :param list components: Components ID list, defaults to all :return: Created subscriber data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#subscribers """ data = ApiParams() data['email'] = email data['verify'] = verify data['components'] = components return self._post('subscribers', data=data)['data']
def guesstype(timestr): """Tries to guess whether a string represents a time or a time delta and returns the appropriate object. :param timestr (required) The string to be analyzed """ timestr_full = " {} ".format(timestr) if timestr_full.find(" in ") != -1 or timestr_full.find(" ago ") != -1: return Chronyk(timestr) comps = ["second", "minute", "hour", "day", "week", "month", "year"] for comp in comps: if timestr_full.find(comp) != -1: return ChronykDelta(timestr) return Chronyk(timestr)
Tries to guess whether a string represents a time or a time delta and returns the appropriate object. :param timestr (required) The string to be analyzed
Below is the the instruction that describes the task: ### Input: Tries to guess whether a string represents a time or a time delta and returns the appropriate object. :param timestr (required) The string to be analyzed ### Response: def guesstype(timestr): """Tries to guess whether a string represents a time or a time delta and returns the appropriate object. :param timestr (required) The string to be analyzed """ timestr_full = " {} ".format(timestr) if timestr_full.find(" in ") != -1 or timestr_full.find(" ago ") != -1: return Chronyk(timestr) comps = ["second", "minute", "hour", "day", "week", "month", "year"] for comp in comps: if timestr_full.find(comp) != -1: return ChronykDelta(timestr) return Chronyk(timestr)
def _fw_delete(self, drvr_name, data): """Firewall Delete routine. This function calls routines to remove FW from fabric and device. It also updates its local cache. """ fw_id = data.get('firewall_id') tenant_id = self.tenant_db.get_fw_tenant(fw_id) if tenant_id not in self.fwid_attr: LOG.error("Invalid tenant id for FW delete %s", tenant_id) return tenant_obj = self.fwid_attr[tenant_id] ret = self._check_delete_fw(tenant_id, drvr_name) if ret: tenant_obj.delete_fw(fw_id) self.tenant_db.del_fw_tenant(fw_id)
Firewall Delete routine. This function calls routines to remove FW from fabric and device. It also updates its local cache.
Below is the the instruction that describes the task: ### Input: Firewall Delete routine. This function calls routines to remove FW from fabric and device. It also updates its local cache. ### Response: def _fw_delete(self, drvr_name, data): """Firewall Delete routine. This function calls routines to remove FW from fabric and device. It also updates its local cache. """ fw_id = data.get('firewall_id') tenant_id = self.tenant_db.get_fw_tenant(fw_id) if tenant_id not in self.fwid_attr: LOG.error("Invalid tenant id for FW delete %s", tenant_id) return tenant_obj = self.fwid_attr[tenant_id] ret = self._check_delete_fw(tenant_id, drvr_name) if ret: tenant_obj.delete_fw(fw_id) self.tenant_db.del_fw_tenant(fw_id)
def transform(self, X, y=None, scan_onsets=None): """ Use the model to estimate the time course of response to each condition (ts), and the time course unrelated to task (ts0) which is spread across the brain. This is equivalent to "decoding" the design matrix and nuisance regressors from a new dataset different from the training dataset on which fit() was applied. An AR(1) smooth prior is imposed on the decoded ts and ts0 with the AR(1) parameters learnt from the corresponding time courses in the training data. Notice: if you set the rank to be lower than the number of experimental conditions (number of columns in the design matrix), the recovered task-related activity will have collinearity (the recovered time courses of some conditions can be linearly explained by the recovered time courses of other conditions). Parameters ---------- X : numpy arrays, shape=[time_points, voxels] fMRI data of new data of the same subject. The voxels should match those used in the fit() function. If data are z-scored (recommended) when fitting the model, data should be z-scored as well when calling transform() y : not used (as it is unsupervised learning) scan_onsets : numpy array, shape=[number of runs]. A list of indices corresponding to the onsets of scans in the data X. If not provided, data will be assumed to be acquired in a continuous scan. Returns ------- ts : numpy arrays, shape = [time_points, condition] The estimated response to the task conditions which have the response amplitudes estimated during the fit step. ts0: numpy array, shape = [time_points, n_nureg] The estimated time course spread across the brain, with the loading weights estimated during the fit step. """ assert X.ndim == 2 and X.shape[1] == self.beta_.shape[1], \ 'The shape of X is not consistent with the shape of data '\ 'used in the fitting step. They should have the same number '\ 'of voxels' assert scan_onsets is None or (scan_onsets.ndim == 1 and 0 in scan_onsets), \ 'scan_onsets should either be None or an array of indices '\ 'If it is given, it should include at least 0' if scan_onsets is None: scan_onsets = np.array([0], dtype=int) else: scan_onsets = np.int32(scan_onsets) ts, ts0, log_p = self._transform( Y=X, scan_onsets=scan_onsets, beta=self.beta_, beta0=self.beta0_, rho_e=self.rho_, sigma_e=self.sigma_, rho_X=self._rho_design_, sigma2_X=self._sigma2_design_, rho_X0=self._rho_X0_, sigma2_X0=self._sigma2_X0_) return ts, ts0
Use the model to estimate the time course of response to each condition (ts), and the time course unrelated to task (ts0) which is spread across the brain. This is equivalent to "decoding" the design matrix and nuisance regressors from a new dataset different from the training dataset on which fit() was applied. An AR(1) smooth prior is imposed on the decoded ts and ts0 with the AR(1) parameters learnt from the corresponding time courses in the training data. Notice: if you set the rank to be lower than the number of experimental conditions (number of columns in the design matrix), the recovered task-related activity will have collinearity (the recovered time courses of some conditions can be linearly explained by the recovered time courses of other conditions). Parameters ---------- X : numpy arrays, shape=[time_points, voxels] fMRI data of new data of the same subject. The voxels should match those used in the fit() function. If data are z-scored (recommended) when fitting the model, data should be z-scored as well when calling transform() y : not used (as it is unsupervised learning) scan_onsets : numpy array, shape=[number of runs]. A list of indices corresponding to the onsets of scans in the data X. If not provided, data will be assumed to be acquired in a continuous scan. Returns ------- ts : numpy arrays, shape = [time_points, condition] The estimated response to the task conditions which have the response amplitudes estimated during the fit step. ts0: numpy array, shape = [time_points, n_nureg] The estimated time course spread across the brain, with the loading weights estimated during the fit step.
Below is the the instruction that describes the task: ### Input: Use the model to estimate the time course of response to each condition (ts), and the time course unrelated to task (ts0) which is spread across the brain. This is equivalent to "decoding" the design matrix and nuisance regressors from a new dataset different from the training dataset on which fit() was applied. An AR(1) smooth prior is imposed on the decoded ts and ts0 with the AR(1) parameters learnt from the corresponding time courses in the training data. Notice: if you set the rank to be lower than the number of experimental conditions (number of columns in the design matrix), the recovered task-related activity will have collinearity (the recovered time courses of some conditions can be linearly explained by the recovered time courses of other conditions). Parameters ---------- X : numpy arrays, shape=[time_points, voxels] fMRI data of new data of the same subject. The voxels should match those used in the fit() function. If data are z-scored (recommended) when fitting the model, data should be z-scored as well when calling transform() y : not used (as it is unsupervised learning) scan_onsets : numpy array, shape=[number of runs]. A list of indices corresponding to the onsets of scans in the data X. If not provided, data will be assumed to be acquired in a continuous scan. Returns ------- ts : numpy arrays, shape = [time_points, condition] The estimated response to the task conditions which have the response amplitudes estimated during the fit step. ts0: numpy array, shape = [time_points, n_nureg] The estimated time course spread across the brain, with the loading weights estimated during the fit step. ### Response: def transform(self, X, y=None, scan_onsets=None): """ Use the model to estimate the time course of response to each condition (ts), and the time course unrelated to task (ts0) which is spread across the brain. This is equivalent to "decoding" the design matrix and nuisance regressors from a new dataset different from the training dataset on which fit() was applied. An AR(1) smooth prior is imposed on the decoded ts and ts0 with the AR(1) parameters learnt from the corresponding time courses in the training data. Notice: if you set the rank to be lower than the number of experimental conditions (number of columns in the design matrix), the recovered task-related activity will have collinearity (the recovered time courses of some conditions can be linearly explained by the recovered time courses of other conditions). Parameters ---------- X : numpy arrays, shape=[time_points, voxels] fMRI data of new data of the same subject. The voxels should match those used in the fit() function. If data are z-scored (recommended) when fitting the model, data should be z-scored as well when calling transform() y : not used (as it is unsupervised learning) scan_onsets : numpy array, shape=[number of runs]. A list of indices corresponding to the onsets of scans in the data X. If not provided, data will be assumed to be acquired in a continuous scan. Returns ------- ts : numpy arrays, shape = [time_points, condition] The estimated response to the task conditions which have the response amplitudes estimated during the fit step. ts0: numpy array, shape = [time_points, n_nureg] The estimated time course spread across the brain, with the loading weights estimated during the fit step. """ assert X.ndim == 2 and X.shape[1] == self.beta_.shape[1], \ 'The shape of X is not consistent with the shape of data '\ 'used in the fitting step. They should have the same number '\ 'of voxels' assert scan_onsets is None or (scan_onsets.ndim == 1 and 0 in scan_onsets), \ 'scan_onsets should either be None or an array of indices '\ 'If it is given, it should include at least 0' if scan_onsets is None: scan_onsets = np.array([0], dtype=int) else: scan_onsets = np.int32(scan_onsets) ts, ts0, log_p = self._transform( Y=X, scan_onsets=scan_onsets, beta=self.beta_, beta0=self.beta0_, rho_e=self.rho_, sigma_e=self.sigma_, rho_X=self._rho_design_, sigma2_X=self._sigma2_design_, rho_X0=self._rho_X0_, sigma2_X0=self._sigma2_X0_) return ts, ts0
def emit(self, record): """Emit a record. Output the record to the file, catering for rollover as described in doRollover(). """ try: if self.shouldRollover(record): self.doRollover() FileHandler.emit(self, record) except (KeyboardInterrupt, SystemExit): raise except Exception: self.handleError(record)
Emit a record. Output the record to the file, catering for rollover as described in doRollover().
Below is the the instruction that describes the task: ### Input: Emit a record. Output the record to the file, catering for rollover as described in doRollover(). ### Response: def emit(self, record): """Emit a record. Output the record to the file, catering for rollover as described in doRollover(). """ try: if self.shouldRollover(record): self.doRollover() FileHandler.emit(self, record) except (KeyboardInterrupt, SystemExit): raise except Exception: self.handleError(record)
def value_equality(cls: type = None, *, unhashable: bool = False, distinct_child_types: bool = False, manual_cls: bool = False, approximate: bool = False ) -> Union[Callable[[type], type], type]: """Implements __eq__/__ne__/__hash__ via a _value_equality_values_ method. _value_equality_values_ is a method that the decorated class must implement. _value_equality_approximate_values_ is a method that the decorated class might implement if special support for approximate equality is required. This is only used when approximate argument is set. When approximate argument is set and _value_equality_approximate_values_ is not defined, _value_equality_values_ values are used for approximate equality. For example, this can be used to compare periodic values like angles: the angle value can be wrapped with `PeriodicValue`. When returned as part of approximate values a special normalization will be done automatically to guarantee correctness. Note that the type of the decorated value is included as part of the value equality values. This is so that completely separate classes with identical equality values (e.g. a Point2D and a Vector2D) don't compare as equal. Further note that this means that child types of the decorated type will be considered equal to each other, though this behavior can be changed via the 'distinct_child_types` argument. The type logic is implemented behind the scenes by a `_value_equality_values_cls_` method added to the class. Args: cls: The type to decorate. Automatically passed in by python when using the @cirq.value_equality decorator notation on a class. unhashable: When set, the __hash__ method will be set to None instead of to a hash of the equality class and equality values. Useful for mutable types such as dictionaries. distinct_child_types: When set, classes that inherit from the decorated class will not be considered equal to it. Also, different child classes will not be considered equal to each other. Useful for when the decorated class is an abstract class or trait that is helping to define equality for many conceptually distinct concrete classes. manual_cls: When set, the method '_value_equality_values_cls_' must be implemented. This allows a new class to compare as equal to another existing class that is also using value equality, by having the new class return the existing class' type. Incompatible with `distinct_child_types`. approximate: When set, the decorated class will be enhanced with `_approx_eq_` implementation and thus start to support the `SupportsApproximateEquality` protocol. """ # If keyword arguments were specified, python invokes the decorator method # without a `cls` argument, then passes `cls` into the result. if cls is None: return lambda deferred_cls: value_equality(deferred_cls, unhashable=unhashable, manual_cls=manual_cls, distinct_child_types= distinct_child_types, approximate=approximate) if distinct_child_types and manual_cls: raise ValueError("'distinct_child_types' is " "incompatible with 'manual_cls") values_getter = getattr(cls, '_value_equality_values_', None) if values_getter is None: raise TypeError('The @cirq.value_equality decorator requires a ' '_value_equality_values_ method to be defined.') if distinct_child_types: setattr(cls, '_value_equality_values_cls_', lambda self: type(self)) elif manual_cls: cls_getter = getattr(cls, '_value_equality_values_cls_', None) if cls_getter is None: raise TypeError('The @cirq.value_equality decorator requires a ' '_value_equality_values_cls_ method to be defined ' 'when "manual_cls" is set.') else: setattr(cls, '_value_equality_values_cls_', lambda self: cls) setattr(cls, '__hash__', None if unhashable else _value_equality_hash) setattr(cls, '__eq__', _value_equality_eq) setattr(cls, '__ne__', _value_equality_ne) if approximate: if not hasattr(cls, '_value_equality_approximate_values_'): setattr(cls, '_value_equality_approximate_values_', values_getter) setattr(cls, '_approx_eq_', _value_equality_approx_eq) return cls
Implements __eq__/__ne__/__hash__ via a _value_equality_values_ method. _value_equality_values_ is a method that the decorated class must implement. _value_equality_approximate_values_ is a method that the decorated class might implement if special support for approximate equality is required. This is only used when approximate argument is set. When approximate argument is set and _value_equality_approximate_values_ is not defined, _value_equality_values_ values are used for approximate equality. For example, this can be used to compare periodic values like angles: the angle value can be wrapped with `PeriodicValue`. When returned as part of approximate values a special normalization will be done automatically to guarantee correctness. Note that the type of the decorated value is included as part of the value equality values. This is so that completely separate classes with identical equality values (e.g. a Point2D and a Vector2D) don't compare as equal. Further note that this means that child types of the decorated type will be considered equal to each other, though this behavior can be changed via the 'distinct_child_types` argument. The type logic is implemented behind the scenes by a `_value_equality_values_cls_` method added to the class. Args: cls: The type to decorate. Automatically passed in by python when using the @cirq.value_equality decorator notation on a class. unhashable: When set, the __hash__ method will be set to None instead of to a hash of the equality class and equality values. Useful for mutable types such as dictionaries. distinct_child_types: When set, classes that inherit from the decorated class will not be considered equal to it. Also, different child classes will not be considered equal to each other. Useful for when the decorated class is an abstract class or trait that is helping to define equality for many conceptually distinct concrete classes. manual_cls: When set, the method '_value_equality_values_cls_' must be implemented. This allows a new class to compare as equal to another existing class that is also using value equality, by having the new class return the existing class' type. Incompatible with `distinct_child_types`. approximate: When set, the decorated class will be enhanced with `_approx_eq_` implementation and thus start to support the `SupportsApproximateEquality` protocol.
Below is the the instruction that describes the task: ### Input: Implements __eq__/__ne__/__hash__ via a _value_equality_values_ method. _value_equality_values_ is a method that the decorated class must implement. _value_equality_approximate_values_ is a method that the decorated class might implement if special support for approximate equality is required. This is only used when approximate argument is set. When approximate argument is set and _value_equality_approximate_values_ is not defined, _value_equality_values_ values are used for approximate equality. For example, this can be used to compare periodic values like angles: the angle value can be wrapped with `PeriodicValue`. When returned as part of approximate values a special normalization will be done automatically to guarantee correctness. Note that the type of the decorated value is included as part of the value equality values. This is so that completely separate classes with identical equality values (e.g. a Point2D and a Vector2D) don't compare as equal. Further note that this means that child types of the decorated type will be considered equal to each other, though this behavior can be changed via the 'distinct_child_types` argument. The type logic is implemented behind the scenes by a `_value_equality_values_cls_` method added to the class. Args: cls: The type to decorate. Automatically passed in by python when using the @cirq.value_equality decorator notation on a class. unhashable: When set, the __hash__ method will be set to None instead of to a hash of the equality class and equality values. Useful for mutable types such as dictionaries. distinct_child_types: When set, classes that inherit from the decorated class will not be considered equal to it. Also, different child classes will not be considered equal to each other. Useful for when the decorated class is an abstract class or trait that is helping to define equality for many conceptually distinct concrete classes. manual_cls: When set, the method '_value_equality_values_cls_' must be implemented. This allows a new class to compare as equal to another existing class that is also using value equality, by having the new class return the existing class' type. Incompatible with `distinct_child_types`. approximate: When set, the decorated class will be enhanced with `_approx_eq_` implementation and thus start to support the `SupportsApproximateEquality` protocol. ### Response: def value_equality(cls: type = None, *, unhashable: bool = False, distinct_child_types: bool = False, manual_cls: bool = False, approximate: bool = False ) -> Union[Callable[[type], type], type]: """Implements __eq__/__ne__/__hash__ via a _value_equality_values_ method. _value_equality_values_ is a method that the decorated class must implement. _value_equality_approximate_values_ is a method that the decorated class might implement if special support for approximate equality is required. This is only used when approximate argument is set. When approximate argument is set and _value_equality_approximate_values_ is not defined, _value_equality_values_ values are used for approximate equality. For example, this can be used to compare periodic values like angles: the angle value can be wrapped with `PeriodicValue`. When returned as part of approximate values a special normalization will be done automatically to guarantee correctness. Note that the type of the decorated value is included as part of the value equality values. This is so that completely separate classes with identical equality values (e.g. a Point2D and a Vector2D) don't compare as equal. Further note that this means that child types of the decorated type will be considered equal to each other, though this behavior can be changed via the 'distinct_child_types` argument. The type logic is implemented behind the scenes by a `_value_equality_values_cls_` method added to the class. Args: cls: The type to decorate. Automatically passed in by python when using the @cirq.value_equality decorator notation on a class. unhashable: When set, the __hash__ method will be set to None instead of to a hash of the equality class and equality values. Useful for mutable types such as dictionaries. distinct_child_types: When set, classes that inherit from the decorated class will not be considered equal to it. Also, different child classes will not be considered equal to each other. Useful for when the decorated class is an abstract class or trait that is helping to define equality for many conceptually distinct concrete classes. manual_cls: When set, the method '_value_equality_values_cls_' must be implemented. This allows a new class to compare as equal to another existing class that is also using value equality, by having the new class return the existing class' type. Incompatible with `distinct_child_types`. approximate: When set, the decorated class will be enhanced with `_approx_eq_` implementation and thus start to support the `SupportsApproximateEquality` protocol. """ # If keyword arguments were specified, python invokes the decorator method # without a `cls` argument, then passes `cls` into the result. if cls is None: return lambda deferred_cls: value_equality(deferred_cls, unhashable=unhashable, manual_cls=manual_cls, distinct_child_types= distinct_child_types, approximate=approximate) if distinct_child_types and manual_cls: raise ValueError("'distinct_child_types' is " "incompatible with 'manual_cls") values_getter = getattr(cls, '_value_equality_values_', None) if values_getter is None: raise TypeError('The @cirq.value_equality decorator requires a ' '_value_equality_values_ method to be defined.') if distinct_child_types: setattr(cls, '_value_equality_values_cls_', lambda self: type(self)) elif manual_cls: cls_getter = getattr(cls, '_value_equality_values_cls_', None) if cls_getter is None: raise TypeError('The @cirq.value_equality decorator requires a ' '_value_equality_values_cls_ method to be defined ' 'when "manual_cls" is set.') else: setattr(cls, '_value_equality_values_cls_', lambda self: cls) setattr(cls, '__hash__', None if unhashable else _value_equality_hash) setattr(cls, '__eq__', _value_equality_eq) setattr(cls, '__ne__', _value_equality_ne) if approximate: if not hasattr(cls, '_value_equality_approximate_values_'): setattr(cls, '_value_equality_approximate_values_', values_getter) setattr(cls, '_approx_eq_', _value_equality_approx_eq) return cls
def alexnet(pretrained=False, **kwargs): r"""AlexNet model architecture from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = AlexNet(**kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['alexnet'])) return model
r"""AlexNet model architecture from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Below is the the instruction that describes the task: ### Input: r"""AlexNet model architecture from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet ### Response: def alexnet(pretrained=False, **kwargs): r"""AlexNet model architecture from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = AlexNet(**kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['alexnet'])) return model
def connection(self): """ Convenience property for externally accessing an authenticated connection to the server. This connection is automatically handled by the appcontext, so you do not have to perform an unbind. Returns: ldap3.Connection: A bound ldap3.Connection Raises: ldap3.core.exceptions.LDAPException: Since this method is performing a bind on behalf of the caller. You should handle this case occuring, such as invalid service credentials. """ ctx = stack.top if ctx is None: raise Exception("Working outside of the Flask application " "context. If you wish to make a connection outside of a flask" " application context, please handle your connections " "and use manager.make_connection()") if hasattr(ctx, 'ldap3_manager_main_connection'): return ctx.ldap3_manager_main_connection else: connection = self._make_connection( bind_user=self.config.get('LDAP_BIND_USER_DN'), bind_password=self.config.get('LDAP_BIND_USER_PASSWORD'), contextualise=False ) connection.bind() if ctx is not None: ctx.ldap3_manager_main_connection = connection return connection
Convenience property for externally accessing an authenticated connection to the server. This connection is automatically handled by the appcontext, so you do not have to perform an unbind. Returns: ldap3.Connection: A bound ldap3.Connection Raises: ldap3.core.exceptions.LDAPException: Since this method is performing a bind on behalf of the caller. You should handle this case occuring, such as invalid service credentials.
Below is the the instruction that describes the task: ### Input: Convenience property for externally accessing an authenticated connection to the server. This connection is automatically handled by the appcontext, so you do not have to perform an unbind. Returns: ldap3.Connection: A bound ldap3.Connection Raises: ldap3.core.exceptions.LDAPException: Since this method is performing a bind on behalf of the caller. You should handle this case occuring, such as invalid service credentials. ### Response: def connection(self): """ Convenience property for externally accessing an authenticated connection to the server. This connection is automatically handled by the appcontext, so you do not have to perform an unbind. Returns: ldap3.Connection: A bound ldap3.Connection Raises: ldap3.core.exceptions.LDAPException: Since this method is performing a bind on behalf of the caller. You should handle this case occuring, such as invalid service credentials. """ ctx = stack.top if ctx is None: raise Exception("Working outside of the Flask application " "context. If you wish to make a connection outside of a flask" " application context, please handle your connections " "and use manager.make_connection()") if hasattr(ctx, 'ldap3_manager_main_connection'): return ctx.ldap3_manager_main_connection else: connection = self._make_connection( bind_user=self.config.get('LDAP_BIND_USER_DN'), bind_password=self.config.get('LDAP_BIND_USER_PASSWORD'), contextualise=False ) connection.bind() if ctx is not None: ctx.ldap3_manager_main_connection = connection return connection
def cmd(send, msg, _): """Tells you what acronyms mean. Syntax: {command} <term> """ try: answer = subprocess.check_output(['wtf', msg], stderr=subprocess.STDOUT) send(answer.decode().strip().replace('\n', ' or ').replace('fuck', 'fsck')) except subprocess.CalledProcessError as ex: send(ex.output.decode().rstrip().splitlines()[0])
Tells you what acronyms mean. Syntax: {command} <term>
Below is the the instruction that describes the task: ### Input: Tells you what acronyms mean. Syntax: {command} <term> ### Response: def cmd(send, msg, _): """Tells you what acronyms mean. Syntax: {command} <term> """ try: answer = subprocess.check_output(['wtf', msg], stderr=subprocess.STDOUT) send(answer.decode().strip().replace('\n', ' or ').replace('fuck', 'fsck')) except subprocess.CalledProcessError as ex: send(ex.output.decode().rstrip().splitlines()[0])
def thread_pool(*workers, results=None, end_of_queue=EndOfQueue): """Returns a |pull| object, call it ``r``, starting a thread for each given worker. Each thread pulls from the source that ``r`` is connected to, and the returned results are pushed to a |Queue|. ``r`` yields from the other end of the same |Queue|. The target function for each thread is |patch|, which can be stopped by exhausting the source. If all threads have ended, the result queue receives end-of-queue. :param results: If results should go somewhere else than a newly constructed |Queue|, a different |Connection| object can be given. :type results: |Connection| :param end_of_queue: end-of-queue signal object passed on to the creation of the |Queue| object. :rtype: |pull| """ if results is None: results = Queue(end_of_queue=end_of_queue) count = thread_counter(results.close) @pull def thread_pool_results(source): for worker in workers: t = threading.Thread( target=count(patch), args=(pull(source) >> worker, results.sink), daemon=True) t.start() yield from results.source() return thread_pool_results
Returns a |pull| object, call it ``r``, starting a thread for each given worker. Each thread pulls from the source that ``r`` is connected to, and the returned results are pushed to a |Queue|. ``r`` yields from the other end of the same |Queue|. The target function for each thread is |patch|, which can be stopped by exhausting the source. If all threads have ended, the result queue receives end-of-queue. :param results: If results should go somewhere else than a newly constructed |Queue|, a different |Connection| object can be given. :type results: |Connection| :param end_of_queue: end-of-queue signal object passed on to the creation of the |Queue| object. :rtype: |pull|
Below is the the instruction that describes the task: ### Input: Returns a |pull| object, call it ``r``, starting a thread for each given worker. Each thread pulls from the source that ``r`` is connected to, and the returned results are pushed to a |Queue|. ``r`` yields from the other end of the same |Queue|. The target function for each thread is |patch|, which can be stopped by exhausting the source. If all threads have ended, the result queue receives end-of-queue. :param results: If results should go somewhere else than a newly constructed |Queue|, a different |Connection| object can be given. :type results: |Connection| :param end_of_queue: end-of-queue signal object passed on to the creation of the |Queue| object. :rtype: |pull| ### Response: def thread_pool(*workers, results=None, end_of_queue=EndOfQueue): """Returns a |pull| object, call it ``r``, starting a thread for each given worker. Each thread pulls from the source that ``r`` is connected to, and the returned results are pushed to a |Queue|. ``r`` yields from the other end of the same |Queue|. The target function for each thread is |patch|, which can be stopped by exhausting the source. If all threads have ended, the result queue receives end-of-queue. :param results: If results should go somewhere else than a newly constructed |Queue|, a different |Connection| object can be given. :type results: |Connection| :param end_of_queue: end-of-queue signal object passed on to the creation of the |Queue| object. :rtype: |pull| """ if results is None: results = Queue(end_of_queue=end_of_queue) count = thread_counter(results.close) @pull def thread_pool_results(source): for worker in workers: t = threading.Thread( target=count(patch), args=(pull(source) >> worker, results.sink), daemon=True) t.start() yield from results.source() return thread_pool_results
def start_fileoutput (self): """Start output to configured file.""" path = os.path.dirname(self.filename) try: if path and not os.path.isdir(path): os.makedirs(path) self.fd = self.create_fd() self.close_fd = True except IOError: msg = sys.exc_info()[1] log.warn(LOG_CHECK, "Could not open file %r for writing: %s\n" "Disabling log output of %s", self.filename, msg, self) self.fd = dummy.Dummy() self.is_active = False self.filename = None
Start output to configured file.
Below is the the instruction that describes the task: ### Input: Start output to configured file. ### Response: def start_fileoutput (self): """Start output to configured file.""" path = os.path.dirname(self.filename) try: if path and not os.path.isdir(path): os.makedirs(path) self.fd = self.create_fd() self.close_fd = True except IOError: msg = sys.exc_info()[1] log.warn(LOG_CHECK, "Could not open file %r for writing: %s\n" "Disabling log output of %s", self.filename, msg, self) self.fd = dummy.Dummy() self.is_active = False self.filename = None
def set_converter(self, file_format, converter): """ Register a Converter and the FileFormat that it is able to convert from Parameters ---------- converter : Converter The converter to register file_format : FileFormat The file format that can be converted into this format """ self._converters[file_format.name] = (file_format, converter)
Register a Converter and the FileFormat that it is able to convert from Parameters ---------- converter : Converter The converter to register file_format : FileFormat The file format that can be converted into this format
Below is the the instruction that describes the task: ### Input: Register a Converter and the FileFormat that it is able to convert from Parameters ---------- converter : Converter The converter to register file_format : FileFormat The file format that can be converted into this format ### Response: def set_converter(self, file_format, converter): """ Register a Converter and the FileFormat that it is able to convert from Parameters ---------- converter : Converter The converter to register file_format : FileFormat The file format that can be converted into this format """ self._converters[file_format.name] = (file_format, converter)
def set_hash(self, algo, digest): """Set algorithm ID and hexadecimal digest for next operation.""" self.algo = algo self.digest = digest
Set algorithm ID and hexadecimal digest for next operation.
Below is the the instruction that describes the task: ### Input: Set algorithm ID and hexadecimal digest for next operation. ### Response: def set_hash(self, algo, digest): """Set algorithm ID and hexadecimal digest for next operation.""" self.algo = algo self.digest = digest
def start_evaluating(self, n: Node, s: ShExJ.shapeExpr) -> Optional[bool]: """Indicate that we are beginning to evaluate n according to shape expression s. If we are already in the process of evaluating (n,s), as indicated self.evaluating, we return our current guess as to the result. :param n: Node to be evaluated :param s: expression for node evaluation :return: Assumed evaluation result. If None, evaluation must be performed """ if not s.id: s.id = str(BNode()) # Random permanant id key = (n, s.id) # We only evaluate a node once if key in self.known_results: return self.known_results[key] if key not in self.evaluating: self.evaluating.add(key) return None elif key not in self.assumptions: self.assumptions[key] = True return self.assumptions[key]
Indicate that we are beginning to evaluate n according to shape expression s. If we are already in the process of evaluating (n,s), as indicated self.evaluating, we return our current guess as to the result. :param n: Node to be evaluated :param s: expression for node evaluation :return: Assumed evaluation result. If None, evaluation must be performed
Below is the the instruction that describes the task: ### Input: Indicate that we are beginning to evaluate n according to shape expression s. If we are already in the process of evaluating (n,s), as indicated self.evaluating, we return our current guess as to the result. :param n: Node to be evaluated :param s: expression for node evaluation :return: Assumed evaluation result. If None, evaluation must be performed ### Response: def start_evaluating(self, n: Node, s: ShExJ.shapeExpr) -> Optional[bool]: """Indicate that we are beginning to evaluate n according to shape expression s. If we are already in the process of evaluating (n,s), as indicated self.evaluating, we return our current guess as to the result. :param n: Node to be evaluated :param s: expression for node evaluation :return: Assumed evaluation result. If None, evaluation must be performed """ if not s.id: s.id = str(BNode()) # Random permanant id key = (n, s.id) # We only evaluate a node once if key in self.known_results: return self.known_results[key] if key not in self.evaluating: self.evaluating.add(key) return None elif key not in self.assumptions: self.assumptions[key] = True return self.assumptions[key]
def on_headers(self, response, exc=None): '''Websocket upgrade as ``on_headers`` event.''' if response.status_code == 101: connection = response.connection request = response.request handler = request.websocket_handler if not handler: handler = WS() parser = request.client.frame_parser(kind=1) consumer = partial(WebSocketClient.create, response, handler, parser) connection.upgrade(consumer) response.event('post_request').fire() websocket = connection.current_consumer() response.request_again = lambda r: websocket
Websocket upgrade as ``on_headers`` event.
Below is the the instruction that describes the task: ### Input: Websocket upgrade as ``on_headers`` event. ### Response: def on_headers(self, response, exc=None): '''Websocket upgrade as ``on_headers`` event.''' if response.status_code == 101: connection = response.connection request = response.request handler = request.websocket_handler if not handler: handler = WS() parser = request.client.frame_parser(kind=1) consumer = partial(WebSocketClient.create, response, handler, parser) connection.upgrade(consumer) response.event('post_request').fire() websocket = connection.current_consumer() response.request_again = lambda r: websocket
def center(a: Union[Set["Point2"], List["Point2"]]) -> "Point2": """ Returns the central point for points in list """ s = Point2((0, 0)) for p in a: s += p return s / len(a)
Returns the central point for points in list
Below is the the instruction that describes the task: ### Input: Returns the central point for points in list ### Response: def center(a: Union[Set["Point2"], List["Point2"]]) -> "Point2": """ Returns the central point for points in list """ s = Point2((0, 0)) for p in a: s += p return s / len(a)
def have_same_affine(one_img, another_img, only_check_3d=False): """Return True if the affine matrix of one_img is close to the affine matrix of another_img. False otherwise. Parameters ---------- one_img: nibabel.Nifti1Image another_img: nibabel.Nifti1Image only_check_3d: bool If True will extract only the 3D part of the affine matrices when they have more dimensions. Returns ------- bool Raises ------ ValueError """ img1 = check_img(one_img) img2 = check_img(another_img) ndim1 = len(img1.shape) ndim2 = len(img2.shape) if ndim1 < 3: raise ValueError('Image {} has only {} dimensions, at least 3 dimensions is expected.'.format(repr_imgs(img1), ndim1)) if ndim2 < 3: raise ValueError('Image {} has only {} dimensions, at least 3 dimensions is expected.'.format(repr_imgs(img2), ndim1)) affine1 = img1.get_affine() affine2 = img2.get_affine() if only_check_3d: affine1 = affine1[:3, :3] affine2 = affine2[:3, :3] try: return np.allclose(affine1, affine2) except ValueError: return False except: raise
Return True if the affine matrix of one_img is close to the affine matrix of another_img. False otherwise. Parameters ---------- one_img: nibabel.Nifti1Image another_img: nibabel.Nifti1Image only_check_3d: bool If True will extract only the 3D part of the affine matrices when they have more dimensions. Returns ------- bool Raises ------ ValueError
Below is the the instruction that describes the task: ### Input: Return True if the affine matrix of one_img is close to the affine matrix of another_img. False otherwise. Parameters ---------- one_img: nibabel.Nifti1Image another_img: nibabel.Nifti1Image only_check_3d: bool If True will extract only the 3D part of the affine matrices when they have more dimensions. Returns ------- bool Raises ------ ValueError ### Response: def have_same_affine(one_img, another_img, only_check_3d=False): """Return True if the affine matrix of one_img is close to the affine matrix of another_img. False otherwise. Parameters ---------- one_img: nibabel.Nifti1Image another_img: nibabel.Nifti1Image only_check_3d: bool If True will extract only the 3D part of the affine matrices when they have more dimensions. Returns ------- bool Raises ------ ValueError """ img1 = check_img(one_img) img2 = check_img(another_img) ndim1 = len(img1.shape) ndim2 = len(img2.shape) if ndim1 < 3: raise ValueError('Image {} has only {} dimensions, at least 3 dimensions is expected.'.format(repr_imgs(img1), ndim1)) if ndim2 < 3: raise ValueError('Image {} has only {} dimensions, at least 3 dimensions is expected.'.format(repr_imgs(img2), ndim1)) affine1 = img1.get_affine() affine2 = img2.get_affine() if only_check_3d: affine1 = affine1[:3, :3] affine2 = affine2[:3, :3] try: return np.allclose(affine1, affine2) except ValueError: return False except: raise
def dskobj(dsk): """ Find the set of body ID codes of all objects for which topographic data are provided in a specified DSK file. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskobj_c.html :param dsk: Name of DSK file. :type dsk: str :return: Set of ID codes of objects in DSK file. :rtype: spiceypy.utils.support_types.SpiceCell """ dsk = stypes.stringToCharP(dsk) bodids = stypes.SPICEINT_CELL(10000) libspice.dskobj_c(dsk, ctypes.byref(bodids)) return bodids
Find the set of body ID codes of all objects for which topographic data are provided in a specified DSK file. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskobj_c.html :param dsk: Name of DSK file. :type dsk: str :return: Set of ID codes of objects in DSK file. :rtype: spiceypy.utils.support_types.SpiceCell
Below is the the instruction that describes the task: ### Input: Find the set of body ID codes of all objects for which topographic data are provided in a specified DSK file. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskobj_c.html :param dsk: Name of DSK file. :type dsk: str :return: Set of ID codes of objects in DSK file. :rtype: spiceypy.utils.support_types.SpiceCell ### Response: def dskobj(dsk): """ Find the set of body ID codes of all objects for which topographic data are provided in a specified DSK file. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskobj_c.html :param dsk: Name of DSK file. :type dsk: str :return: Set of ID codes of objects in DSK file. :rtype: spiceypy.utils.support_types.SpiceCell """ dsk = stypes.stringToCharP(dsk) bodids = stypes.SPICEINT_CELL(10000) libspice.dskobj_c(dsk, ctypes.byref(bodids)) return bodids
def network_to_pandas_hdf5(network, filename, rm_nodes=None): """ Save a Network's data to a Pandas HDFStore. Parameters ---------- network : pandana.Network filename : str rm_nodes : array_like A list, array, Index, or Series of node IDs that should *not* be saved as part of the Network. """ if rm_nodes is not None: nodes, edges = remove_nodes(network, rm_nodes) else: nodes, edges = network.nodes_df, network.edges_df with pd.HDFStore(filename, mode='w') as store: store['nodes'] = nodes store['edges'] = edges store['two_way'] = pd.Series([network._twoway]) store['impedance_names'] = pd.Series(network.impedance_names)
Save a Network's data to a Pandas HDFStore. Parameters ---------- network : pandana.Network filename : str rm_nodes : array_like A list, array, Index, or Series of node IDs that should *not* be saved as part of the Network.
Below is the the instruction that describes the task: ### Input: Save a Network's data to a Pandas HDFStore. Parameters ---------- network : pandana.Network filename : str rm_nodes : array_like A list, array, Index, or Series of node IDs that should *not* be saved as part of the Network. ### Response: def network_to_pandas_hdf5(network, filename, rm_nodes=None): """ Save a Network's data to a Pandas HDFStore. Parameters ---------- network : pandana.Network filename : str rm_nodes : array_like A list, array, Index, or Series of node IDs that should *not* be saved as part of the Network. """ if rm_nodes is not None: nodes, edges = remove_nodes(network, rm_nodes) else: nodes, edges = network.nodes_df, network.edges_df with pd.HDFStore(filename, mode='w') as store: store['nodes'] = nodes store['edges'] = edges store['two_way'] = pd.Series([network._twoway]) store['impedance_names'] = pd.Series(network.impedance_names)
def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): return '' saltenv = load['saltenv'] if saltenv == 'base': saltenv = 'trunk' ret = {} relpath = fnd['rel'] path = fnd['path'] # If the file doesn't exist, we can't get a hash if not path or not os.path.isfile(path): return ret # Set the hash_type as it is determined by config ret['hash_type'] = __opts__['hash_type'] # Check if the hash is cached # Cache file's contents should be "hash:mtime" cache_path = os.path.join(__opts__['cachedir'], 'svnfs', 'hash', saltenv, '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) # If we have a cache, serve that if the mtime hasn't changed if os.path.exists(cache_path): with salt.utils.files.fopen(cache_path, 'rb') as fp_: hsum, mtime = fp_.read().split(':') if os.path.getmtime(path) == mtime: # check if mtime changed ret['hsum'] = hsum return ret # if we don't have a cache entry-- lets make one ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type']) cache_dir = os.path.dirname(cache_path) # make cache directory if it doesn't exist if not os.path.exists(cache_dir): os.makedirs(cache_dir) # save the cache object "hash:mtime" with salt.utils.files.fopen(cache_path, 'w') as fp_: fp_.write('{0}:{1}'.format(ret['hsum'], os.path.getmtime(path))) return ret
Return a file hash, the hash type is set in the master config file
Below is the the instruction that describes the task: ### Input: Return a file hash, the hash type is set in the master config file ### Response: def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): return '' saltenv = load['saltenv'] if saltenv == 'base': saltenv = 'trunk' ret = {} relpath = fnd['rel'] path = fnd['path'] # If the file doesn't exist, we can't get a hash if not path or not os.path.isfile(path): return ret # Set the hash_type as it is determined by config ret['hash_type'] = __opts__['hash_type'] # Check if the hash is cached # Cache file's contents should be "hash:mtime" cache_path = os.path.join(__opts__['cachedir'], 'svnfs', 'hash', saltenv, '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) # If we have a cache, serve that if the mtime hasn't changed if os.path.exists(cache_path): with salt.utils.files.fopen(cache_path, 'rb') as fp_: hsum, mtime = fp_.read().split(':') if os.path.getmtime(path) == mtime: # check if mtime changed ret['hsum'] = hsum return ret # if we don't have a cache entry-- lets make one ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type']) cache_dir = os.path.dirname(cache_path) # make cache directory if it doesn't exist if not os.path.exists(cache_dir): os.makedirs(cache_dir) # save the cache object "hash:mtime" with salt.utils.files.fopen(cache_path, 'w') as fp_: fp_.write('{0}:{1}'.format(ret['hsum'], os.path.getmtime(path))) return ret
def _mof_escaped(strvalue): # Note: This is a raw docstring because it shows many backslashes, and # that avoids having to double them. r""" Return a MOF-escaped string from the input string. Parameters: strvalue (:term:`unicode string`): The string value. Must not be `None`. Special characters must not be backslash-escaped. Details on backslash-escaping: `DSP0004` defines that the character repertoire for MOF string constants is the entire repertoire for the CIM string datatype. That is, the entire Unicode character repertoire except for U+0000. The only character for which `DSP0004` requires the use of a MOF escape sequence in a MOF string constant, is the double quote (because a MOF string constant is enclosed in double quotes). `DSP0004` defines MOF escape sequences for several more characters, but it does not require their use in MOF. For example, it is valid for a MOF string constant to contain the (unescaped) characters U+000D (newline) or U+0009 (horizontal tab), and others. Processing the MOF escape sequences as unescaped characters may not be supported by MOF-related tools, and therefore this function plays it safe and uses the MOF escape sequences defined in `DSP0004` as much as possible. The following table shows the MOF escape sequences defined in `DSP0004` and whether they are used (i.e. generated) by this function: ========== ==== =========================================================== MOF escape Used Character sequence ========== ==== =========================================================== \b yes U+0008: Backspace \t yes U+0009: Horizontal tab \n yes U+000A: Line feed \f yes U+000C: Form feed \r yes U+000D: Carriage return \" yes U+0022: Double quote (") (required to be used) \' yes U+0027: Single quote (') \\ yes U+005C: Backslash (\) \x<hex> (1) U+<hex>: Any UCS-2 character, where <hex> is one to four hex digits, representing its UCS code position (this form is limited to the UCS-2 character repertoire) \X<hex> no U+<hex>: Any UCS-2 character, where <hex> is one to four hex digits, representing its UCS code position (this form is limited to the UCS-2 character repertoire) ========== ==== =========================================================== (1) Yes, for all other characters in the so called "control range" U+0001..U+001F. """ escaped_str = strvalue # Escape backslash (\) escaped_str = escaped_str.replace('\\', '\\\\') # Escape \b, \t, \n, \f, \r # Note, the Python escape sequences happen to be the same as in MOF escaped_str = escaped_str.\ replace('\b', '\\b').\ replace('\t', '\\t').\ replace('\n', '\\n').\ replace('\f', '\\f').\ replace('\r', '\\r') # Escape remaining control characters (U+0001...U+001F), skipping # U+0008, U+0009, U+000A, U+000C, U+000D that are already handled. # We hard code it to be faster, plus we can easily skip already handled # chars. # The generic code would be (not skipping already handled chars): # for cp in range(1, 32): # c = six.unichr(cp) # esc = '\\x{0:04X}'.format(cp) # escaped_str = escaped_str.replace(c, esc) escaped_str = escaped_str.\ replace(u'\u0001', '\\x0001').\ replace(u'\u0002', '\\x0002').\ replace(u'\u0003', '\\x0003').\ replace(u'\u0004', '\\x0004').\ replace(u'\u0005', '\\x0005').\ replace(u'\u0006', '\\x0006').\ replace(u'\u0007', '\\x0007').\ replace(u'\u000B', '\\x000B').\ replace(u'\u000E', '\\x000E').\ replace(u'\u000F', '\\x000F').\ replace(u'\u0010', '\\x0010').\ replace(u'\u0011', '\\x0011').\ replace(u'\u0012', '\\x0012').\ replace(u'\u0013', '\\x0013').\ replace(u'\u0014', '\\x0014').\ replace(u'\u0015', '\\x0015').\ replace(u'\u0016', '\\x0016').\ replace(u'\u0017', '\\x0017').\ replace(u'\u0018', '\\x0018').\ replace(u'\u0019', '\\x0019').\ replace(u'\u001A', '\\x001A').\ replace(u'\u001B', '\\x001B').\ replace(u'\u001C', '\\x001C').\ replace(u'\u001D', '\\x001D').\ replace(u'\u001E', '\\x001E').\ replace(u'\u001F', '\\x001F') # Escape single and double quote escaped_str = escaped_str.replace('"', '\\"') escaped_str = escaped_str.replace("'", "\\'") return escaped_str
r""" Return a MOF-escaped string from the input string. Parameters: strvalue (:term:`unicode string`): The string value. Must not be `None`. Special characters must not be backslash-escaped. Details on backslash-escaping: `DSP0004` defines that the character repertoire for MOF string constants is the entire repertoire for the CIM string datatype. That is, the entire Unicode character repertoire except for U+0000. The only character for which `DSP0004` requires the use of a MOF escape sequence in a MOF string constant, is the double quote (because a MOF string constant is enclosed in double quotes). `DSP0004` defines MOF escape sequences for several more characters, but it does not require their use in MOF. For example, it is valid for a MOF string constant to contain the (unescaped) characters U+000D (newline) or U+0009 (horizontal tab), and others. Processing the MOF escape sequences as unescaped characters may not be supported by MOF-related tools, and therefore this function plays it safe and uses the MOF escape sequences defined in `DSP0004` as much as possible. The following table shows the MOF escape sequences defined in `DSP0004` and whether they are used (i.e. generated) by this function: ========== ==== =========================================================== MOF escape Used Character sequence ========== ==== =========================================================== \b yes U+0008: Backspace \t yes U+0009: Horizontal tab \n yes U+000A: Line feed \f yes U+000C: Form feed \r yes U+000D: Carriage return \" yes U+0022: Double quote (") (required to be used) \' yes U+0027: Single quote (') \\ yes U+005C: Backslash (\) \x<hex> (1) U+<hex>: Any UCS-2 character, where <hex> is one to four hex digits, representing its UCS code position (this form is limited to the UCS-2 character repertoire) \X<hex> no U+<hex>: Any UCS-2 character, where <hex> is one to four hex digits, representing its UCS code position (this form is limited to the UCS-2 character repertoire) ========== ==== =========================================================== (1) Yes, for all other characters in the so called "control range" U+0001..U+001F.
Below is the the instruction that describes the task: ### Input: r""" Return a MOF-escaped string from the input string. Parameters: strvalue (:term:`unicode string`): The string value. Must not be `None`. Special characters must not be backslash-escaped. Details on backslash-escaping: `DSP0004` defines that the character repertoire for MOF string constants is the entire repertoire for the CIM string datatype. That is, the entire Unicode character repertoire except for U+0000. The only character for which `DSP0004` requires the use of a MOF escape sequence in a MOF string constant, is the double quote (because a MOF string constant is enclosed in double quotes). `DSP0004` defines MOF escape sequences for several more characters, but it does not require their use in MOF. For example, it is valid for a MOF string constant to contain the (unescaped) characters U+000D (newline) or U+0009 (horizontal tab), and others. Processing the MOF escape sequences as unescaped characters may not be supported by MOF-related tools, and therefore this function plays it safe and uses the MOF escape sequences defined in `DSP0004` as much as possible. The following table shows the MOF escape sequences defined in `DSP0004` and whether they are used (i.e. generated) by this function: ========== ==== =========================================================== MOF escape Used Character sequence ========== ==== =========================================================== \b yes U+0008: Backspace \t yes U+0009: Horizontal tab \n yes U+000A: Line feed \f yes U+000C: Form feed \r yes U+000D: Carriage return \" yes U+0022: Double quote (") (required to be used) \' yes U+0027: Single quote (') \\ yes U+005C: Backslash (\) \x<hex> (1) U+<hex>: Any UCS-2 character, where <hex> is one to four hex digits, representing its UCS code position (this form is limited to the UCS-2 character repertoire) \X<hex> no U+<hex>: Any UCS-2 character, where <hex> is one to four hex digits, representing its UCS code position (this form is limited to the UCS-2 character repertoire) ========== ==== =========================================================== (1) Yes, for all other characters in the so called "control range" U+0001..U+001F. ### Response: def _mof_escaped(strvalue): # Note: This is a raw docstring because it shows many backslashes, and # that avoids having to double them. r""" Return a MOF-escaped string from the input string. Parameters: strvalue (:term:`unicode string`): The string value. Must not be `None`. Special characters must not be backslash-escaped. Details on backslash-escaping: `DSP0004` defines that the character repertoire for MOF string constants is the entire repertoire for the CIM string datatype. That is, the entire Unicode character repertoire except for U+0000. The only character for which `DSP0004` requires the use of a MOF escape sequence in a MOF string constant, is the double quote (because a MOF string constant is enclosed in double quotes). `DSP0004` defines MOF escape sequences for several more characters, but it does not require their use in MOF. For example, it is valid for a MOF string constant to contain the (unescaped) characters U+000D (newline) or U+0009 (horizontal tab), and others. Processing the MOF escape sequences as unescaped characters may not be supported by MOF-related tools, and therefore this function plays it safe and uses the MOF escape sequences defined in `DSP0004` as much as possible. The following table shows the MOF escape sequences defined in `DSP0004` and whether they are used (i.e. generated) by this function: ========== ==== =========================================================== MOF escape Used Character sequence ========== ==== =========================================================== \b yes U+0008: Backspace \t yes U+0009: Horizontal tab \n yes U+000A: Line feed \f yes U+000C: Form feed \r yes U+000D: Carriage return \" yes U+0022: Double quote (") (required to be used) \' yes U+0027: Single quote (') \\ yes U+005C: Backslash (\) \x<hex> (1) U+<hex>: Any UCS-2 character, where <hex> is one to four hex digits, representing its UCS code position (this form is limited to the UCS-2 character repertoire) \X<hex> no U+<hex>: Any UCS-2 character, where <hex> is one to four hex digits, representing its UCS code position (this form is limited to the UCS-2 character repertoire) ========== ==== =========================================================== (1) Yes, for all other characters in the so called "control range" U+0001..U+001F. """ escaped_str = strvalue # Escape backslash (\) escaped_str = escaped_str.replace('\\', '\\\\') # Escape \b, \t, \n, \f, \r # Note, the Python escape sequences happen to be the same as in MOF escaped_str = escaped_str.\ replace('\b', '\\b').\ replace('\t', '\\t').\ replace('\n', '\\n').\ replace('\f', '\\f').\ replace('\r', '\\r') # Escape remaining control characters (U+0001...U+001F), skipping # U+0008, U+0009, U+000A, U+000C, U+000D that are already handled. # We hard code it to be faster, plus we can easily skip already handled # chars. # The generic code would be (not skipping already handled chars): # for cp in range(1, 32): # c = six.unichr(cp) # esc = '\\x{0:04X}'.format(cp) # escaped_str = escaped_str.replace(c, esc) escaped_str = escaped_str.\ replace(u'\u0001', '\\x0001').\ replace(u'\u0002', '\\x0002').\ replace(u'\u0003', '\\x0003').\ replace(u'\u0004', '\\x0004').\ replace(u'\u0005', '\\x0005').\ replace(u'\u0006', '\\x0006').\ replace(u'\u0007', '\\x0007').\ replace(u'\u000B', '\\x000B').\ replace(u'\u000E', '\\x000E').\ replace(u'\u000F', '\\x000F').\ replace(u'\u0010', '\\x0010').\ replace(u'\u0011', '\\x0011').\ replace(u'\u0012', '\\x0012').\ replace(u'\u0013', '\\x0013').\ replace(u'\u0014', '\\x0014').\ replace(u'\u0015', '\\x0015').\ replace(u'\u0016', '\\x0016').\ replace(u'\u0017', '\\x0017').\ replace(u'\u0018', '\\x0018').\ replace(u'\u0019', '\\x0019').\ replace(u'\u001A', '\\x001A').\ replace(u'\u001B', '\\x001B').\ replace(u'\u001C', '\\x001C').\ replace(u'\u001D', '\\x001D').\ replace(u'\u001E', '\\x001E').\ replace(u'\u001F', '\\x001F') # Escape single and double quote escaped_str = escaped_str.replace('"', '\\"') escaped_str = escaped_str.replace("'", "\\'") return escaped_str
def update(self, pop: Union[pd.DataFrame, pd.Series]): """Update the simulation's state to match ``pop`` Parameters ---------- pop : The data which should be copied into the simulation's state. If ``pop`` is a DataFrame only those columns included in the view's columns will be used. If ``pop`` is a Series it must have a name that matches one of the view's columns unless the view only has one column in which case the Series will be assumed to refer to that regardless of its name. """ if not pop.empty: if isinstance(pop, pd.Series): if pop.name in self._columns: affected_columns = [pop.name] elif len(self._columns) == 1: affected_columns = self._columns else: raise PopulationError('Cannot update with a Series unless the series name equals a column ' 'name or there is only a single column in the view') else: affected_columns = set(pop.columns) affected_columns = set(affected_columns).intersection(self._columns) state_table = self.manager.get_population(True) if not self.manager.growing: affected_columns = set(affected_columns).intersection(state_table.columns) for c in affected_columns: if c in state_table: v = state_table[c].values if isinstance(pop, pd.Series): v2 = pop.values else: v2 = pop[c].values v[pop.index] = v2 if v.dtype != v2.dtype: # This happens when the population is being grown because extending # the index forces columns that don't have a natural null type # to become 'object' if not self.manager.growing: raise PopulationError('Component corrupting population table. ' 'Old column type: {} New column type: {}'.format(v.dtype, v2.dtype)) v = v.astype(v2.dtype) else: if isinstance(pop, pd.Series): v = pop.values else: v = pop[c].values self.manager._population[c] = v
Update the simulation's state to match ``pop`` Parameters ---------- pop : The data which should be copied into the simulation's state. If ``pop`` is a DataFrame only those columns included in the view's columns will be used. If ``pop`` is a Series it must have a name that matches one of the view's columns unless the view only has one column in which case the Series will be assumed to refer to that regardless of its name.
Below is the the instruction that describes the task: ### Input: Update the simulation's state to match ``pop`` Parameters ---------- pop : The data which should be copied into the simulation's state. If ``pop`` is a DataFrame only those columns included in the view's columns will be used. If ``pop`` is a Series it must have a name that matches one of the view's columns unless the view only has one column in which case the Series will be assumed to refer to that regardless of its name. ### Response: def update(self, pop: Union[pd.DataFrame, pd.Series]): """Update the simulation's state to match ``pop`` Parameters ---------- pop : The data which should be copied into the simulation's state. If ``pop`` is a DataFrame only those columns included in the view's columns will be used. If ``pop`` is a Series it must have a name that matches one of the view's columns unless the view only has one column in which case the Series will be assumed to refer to that regardless of its name. """ if not pop.empty: if isinstance(pop, pd.Series): if pop.name in self._columns: affected_columns = [pop.name] elif len(self._columns) == 1: affected_columns = self._columns else: raise PopulationError('Cannot update with a Series unless the series name equals a column ' 'name or there is only a single column in the view') else: affected_columns = set(pop.columns) affected_columns = set(affected_columns).intersection(self._columns) state_table = self.manager.get_population(True) if not self.manager.growing: affected_columns = set(affected_columns).intersection(state_table.columns) for c in affected_columns: if c in state_table: v = state_table[c].values if isinstance(pop, pd.Series): v2 = pop.values else: v2 = pop[c].values v[pop.index] = v2 if v.dtype != v2.dtype: # This happens when the population is being grown because extending # the index forces columns that don't have a natural null type # to become 'object' if not self.manager.growing: raise PopulationError('Component corrupting population table. ' 'Old column type: {} New column type: {}'.format(v.dtype, v2.dtype)) v = v.astype(v2.dtype) else: if isinstance(pop, pd.Series): v = pop.values else: v = pop[c].values self.manager._population[c] = v
def createL4L2Column(network, networkConfig, suffix=""): """ Create a a single column containing one L4 and one L2. networkConfig is a dict that must contain the following keys (additional keys ok): { "enableFeedback": True, "externalInputSize": 1024, "sensorInputSize": 1024, "L4RegionType": "py.ApicalTMPairRegion", "L4Params": { <constructor parameters for the L4 region> }, "L2Params": { <constructor parameters for ColumnPoolerRegion> }, "lateralSPParams": { <constructor parameters for optional SPRegion> }, "feedForwardSPParams": { <constructor parameters for optional SPRegion> } } Region names are externalInput, sensorInput, L4Column, and ColumnPoolerRegion. Each name has an optional string suffix appended to it. Configuration options: "lateralSPParams" and "feedForwardSPParams" are optional. If included appropriate spatial pooler regions will be added to the network. If externalInputSize is 0, the externalInput sensor (and SP if appropriate) will NOT be created. In this case it is expected that L4 is a sequence memory region (e.g. ApicalTMSequenceRegion) """ externalInputName = "externalInput" + suffix sensorInputName = "sensorInput" + suffix L4ColumnName = "L4Column" + suffix L2ColumnName = "L2Column" + suffix L4Params = copy.deepcopy(networkConfig["L4Params"]) L4Params["basalInputWidth"] = networkConfig["externalInputSize"] L4Params["apicalInputWidth"] = networkConfig["L2Params"]["cellCount"] if networkConfig["externalInputSize"] > 0: network.addRegion( externalInputName, "py.RawSensor", json.dumps({"outputWidth": networkConfig["externalInputSize"]})) network.addRegion( sensorInputName, "py.RawSensor", json.dumps({"outputWidth": networkConfig["sensorInputSize"]})) # Fixup network to include SP, if defined in networkConfig if networkConfig["externalInputSize"] > 0: _addLateralSPRegion(network, networkConfig, suffix) _addFeedForwardSPRegion(network, networkConfig, suffix) network.addRegion( L4ColumnName, networkConfig["L4RegionType"], json.dumps(L4Params)) network.addRegion( L2ColumnName, "py.ColumnPoolerRegion", json.dumps(networkConfig["L2Params"])) # Set phases appropriately so regions are executed in the proper sequence # This is required when we create multiple columns - the order of execution # is not the same as the order of region creation. if networkConfig["externalInputSize"] > 0: network.setPhases(externalInputName,[0]) network.setPhases(sensorInputName,[0]) _setLateralSPPhases(network, networkConfig) _setFeedForwardSPPhases(network, networkConfig) # L4 and L2 regions always have phases 2 and 3, respectively network.setPhases(L4ColumnName,[2]) network.setPhases(L2ColumnName,[3]) # Link SP region(s), if applicable if networkConfig["externalInputSize"] > 0: _linkLateralSPRegion(network, networkConfig, externalInputName, L4ColumnName) _linkFeedForwardSPRegion(network, networkConfig, sensorInputName, L4ColumnName) # Link L4 to L2 network.link(L4ColumnName, L2ColumnName, "UniformLink", "", srcOutput="activeCells", destInput="feedforwardInput") network.link(L4ColumnName, L2ColumnName, "UniformLink", "", srcOutput="predictedActiveCells", destInput="feedforwardGrowthCandidates") # Link L2 feedback to L4 if networkConfig.get("enableFeedback", True): network.link(L2ColumnName, L4ColumnName, "UniformLink", "", srcOutput="feedForwardOutput", destInput="apicalInput", propagationDelay=1) # Link reset output to L2 and L4 network.link(sensorInputName, L2ColumnName, "UniformLink", "", srcOutput="resetOut", destInput="resetIn") network.link(sensorInputName, L4ColumnName, "UniformLink", "", srcOutput="resetOut", destInput="resetIn") enableProfiling(network) return network
Create a a single column containing one L4 and one L2. networkConfig is a dict that must contain the following keys (additional keys ok): { "enableFeedback": True, "externalInputSize": 1024, "sensorInputSize": 1024, "L4RegionType": "py.ApicalTMPairRegion", "L4Params": { <constructor parameters for the L4 region> }, "L2Params": { <constructor parameters for ColumnPoolerRegion> }, "lateralSPParams": { <constructor parameters for optional SPRegion> }, "feedForwardSPParams": { <constructor parameters for optional SPRegion> } } Region names are externalInput, sensorInput, L4Column, and ColumnPoolerRegion. Each name has an optional string suffix appended to it. Configuration options: "lateralSPParams" and "feedForwardSPParams" are optional. If included appropriate spatial pooler regions will be added to the network. If externalInputSize is 0, the externalInput sensor (and SP if appropriate) will NOT be created. In this case it is expected that L4 is a sequence memory region (e.g. ApicalTMSequenceRegion)
Below is the the instruction that describes the task: ### Input: Create a a single column containing one L4 and one L2. networkConfig is a dict that must contain the following keys (additional keys ok): { "enableFeedback": True, "externalInputSize": 1024, "sensorInputSize": 1024, "L4RegionType": "py.ApicalTMPairRegion", "L4Params": { <constructor parameters for the L4 region> }, "L2Params": { <constructor parameters for ColumnPoolerRegion> }, "lateralSPParams": { <constructor parameters for optional SPRegion> }, "feedForwardSPParams": { <constructor parameters for optional SPRegion> } } Region names are externalInput, sensorInput, L4Column, and ColumnPoolerRegion. Each name has an optional string suffix appended to it. Configuration options: "lateralSPParams" and "feedForwardSPParams" are optional. If included appropriate spatial pooler regions will be added to the network. If externalInputSize is 0, the externalInput sensor (and SP if appropriate) will NOT be created. In this case it is expected that L4 is a sequence memory region (e.g. ApicalTMSequenceRegion) ### Response: def createL4L2Column(network, networkConfig, suffix=""): """ Create a a single column containing one L4 and one L2. networkConfig is a dict that must contain the following keys (additional keys ok): { "enableFeedback": True, "externalInputSize": 1024, "sensorInputSize": 1024, "L4RegionType": "py.ApicalTMPairRegion", "L4Params": { <constructor parameters for the L4 region> }, "L2Params": { <constructor parameters for ColumnPoolerRegion> }, "lateralSPParams": { <constructor parameters for optional SPRegion> }, "feedForwardSPParams": { <constructor parameters for optional SPRegion> } } Region names are externalInput, sensorInput, L4Column, and ColumnPoolerRegion. Each name has an optional string suffix appended to it. Configuration options: "lateralSPParams" and "feedForwardSPParams" are optional. If included appropriate spatial pooler regions will be added to the network. If externalInputSize is 0, the externalInput sensor (and SP if appropriate) will NOT be created. In this case it is expected that L4 is a sequence memory region (e.g. ApicalTMSequenceRegion) """ externalInputName = "externalInput" + suffix sensorInputName = "sensorInput" + suffix L4ColumnName = "L4Column" + suffix L2ColumnName = "L2Column" + suffix L4Params = copy.deepcopy(networkConfig["L4Params"]) L4Params["basalInputWidth"] = networkConfig["externalInputSize"] L4Params["apicalInputWidth"] = networkConfig["L2Params"]["cellCount"] if networkConfig["externalInputSize"] > 0: network.addRegion( externalInputName, "py.RawSensor", json.dumps({"outputWidth": networkConfig["externalInputSize"]})) network.addRegion( sensorInputName, "py.RawSensor", json.dumps({"outputWidth": networkConfig["sensorInputSize"]})) # Fixup network to include SP, if defined in networkConfig if networkConfig["externalInputSize"] > 0: _addLateralSPRegion(network, networkConfig, suffix) _addFeedForwardSPRegion(network, networkConfig, suffix) network.addRegion( L4ColumnName, networkConfig["L4RegionType"], json.dumps(L4Params)) network.addRegion( L2ColumnName, "py.ColumnPoolerRegion", json.dumps(networkConfig["L2Params"])) # Set phases appropriately so regions are executed in the proper sequence # This is required when we create multiple columns - the order of execution # is not the same as the order of region creation. if networkConfig["externalInputSize"] > 0: network.setPhases(externalInputName,[0]) network.setPhases(sensorInputName,[0]) _setLateralSPPhases(network, networkConfig) _setFeedForwardSPPhases(network, networkConfig) # L4 and L2 regions always have phases 2 and 3, respectively network.setPhases(L4ColumnName,[2]) network.setPhases(L2ColumnName,[3]) # Link SP region(s), if applicable if networkConfig["externalInputSize"] > 0: _linkLateralSPRegion(network, networkConfig, externalInputName, L4ColumnName) _linkFeedForwardSPRegion(network, networkConfig, sensorInputName, L4ColumnName) # Link L4 to L2 network.link(L4ColumnName, L2ColumnName, "UniformLink", "", srcOutput="activeCells", destInput="feedforwardInput") network.link(L4ColumnName, L2ColumnName, "UniformLink", "", srcOutput="predictedActiveCells", destInput="feedforwardGrowthCandidates") # Link L2 feedback to L4 if networkConfig.get("enableFeedback", True): network.link(L2ColumnName, L4ColumnName, "UniformLink", "", srcOutput="feedForwardOutput", destInput="apicalInput", propagationDelay=1) # Link reset output to L2 and L4 network.link(sensorInputName, L2ColumnName, "UniformLink", "", srcOutput="resetOut", destInput="resetIn") network.link(sensorInputName, L4ColumnName, "UniformLink", "", srcOutput="resetOut", destInput="resetIn") enableProfiling(network) return network
def parse_from_string( root_processor, # type: RootProcessor xml_string # type: Text ): # type: (...) -> Any """ Parse the XML string using the processor starting from the root of the document. :param xml_string: XML string to parse. See also :func:`declxml.parse_from_file` """ if not _is_valid_root_processor(root_processor): raise InvalidRootProcessor('Invalid root processor') parseable_xml_string = xml_string # type: Union[Text, bytes] if _PY2 and isinstance(xml_string, Text): parseable_xml_string = xml_string.encode('utf-8') root = ET.fromstring(parseable_xml_string) _xml_namespace_strip(root) state = _ProcessorState() state.push_location(root_processor.element_path) return root_processor.parse_at_root(root, state)
Parse the XML string using the processor starting from the root of the document. :param xml_string: XML string to parse. See also :func:`declxml.parse_from_file`
Below is the the instruction that describes the task: ### Input: Parse the XML string using the processor starting from the root of the document. :param xml_string: XML string to parse. See also :func:`declxml.parse_from_file` ### Response: def parse_from_string( root_processor, # type: RootProcessor xml_string # type: Text ): # type: (...) -> Any """ Parse the XML string using the processor starting from the root of the document. :param xml_string: XML string to parse. See also :func:`declxml.parse_from_file` """ if not _is_valid_root_processor(root_processor): raise InvalidRootProcessor('Invalid root processor') parseable_xml_string = xml_string # type: Union[Text, bytes] if _PY2 and isinstance(xml_string, Text): parseable_xml_string = xml_string.encode('utf-8') root = ET.fromstring(parseable_xml_string) _xml_namespace_strip(root) state = _ProcessorState() state.push_location(root_processor.element_path) return root_processor.parse_at_root(root, state)
def post(self, url: str, data: str, expected_status_code=201): """ Do a POST request """ r = requests.post(self._format_url(url), json=data, headers=self.headers, timeout=TIMEOUT) self._check_response(r, expected_status_code) return r.json()
Do a POST request
Below is the the instruction that describes the task: ### Input: Do a POST request ### Response: def post(self, url: str, data: str, expected_status_code=201): """ Do a POST request """ r = requests.post(self._format_url(url), json=data, headers=self.headers, timeout=TIMEOUT) self._check_response(r, expected_status_code) return r.json()
def unregister_presence_callback(self, type_, from_): """ Unregister a callback previously registered with :meth:`register_presence_callback`. :param type_: Presence type to listen for. :type type_: :class:`~.PresenceType` :param from_: Sender JID to listen for, or :data:`None` for a wildcard match. :type from_: :class:`~aioxmpp.JID` or :data:`None`. :raises KeyError: if no callback is currently registered for the given ``(type_, from_)`` pair :raises ValueError: if `type_` is not a valid :class:`~.PresenceType` (and cannot be cast to a :class:`~.PresenceType`) The match is made on the exact pair; it is not possible to unregister arbitrary listeners by passing :data:`None` to the `from_` arguments (i.e. the wildcarding only applies for receiving stanzas, not for unregistering callbacks; unregistering a wildcard match with `from_` set to :data:`None` is of course possible). .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.PresenceType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated. It is recommended to use :class:`aioxmpp.PresenceClient` instead. """ type_ = self._coerce_enum(type_, structs.PresenceType) warnings.warn( "unregister_presence_callback is deprecated; use " "aioxmpp.dispatcher.SimplePresenceDispatcher or " "aioxmpp.PresenceClient instead", DeprecationWarning, stacklevel=2 ) self._xxx_presence_dispatcher.unregister_callback( type_, from_, )
Unregister a callback previously registered with :meth:`register_presence_callback`. :param type_: Presence type to listen for. :type type_: :class:`~.PresenceType` :param from_: Sender JID to listen for, or :data:`None` for a wildcard match. :type from_: :class:`~aioxmpp.JID` or :data:`None`. :raises KeyError: if no callback is currently registered for the given ``(type_, from_)`` pair :raises ValueError: if `type_` is not a valid :class:`~.PresenceType` (and cannot be cast to a :class:`~.PresenceType`) The match is made on the exact pair; it is not possible to unregister arbitrary listeners by passing :data:`None` to the `from_` arguments (i.e. the wildcarding only applies for receiving stanzas, not for unregistering callbacks; unregistering a wildcard match with `from_` set to :data:`None` is of course possible). .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.PresenceType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated. It is recommended to use :class:`aioxmpp.PresenceClient` instead.
Below is the the instruction that describes the task: ### Input: Unregister a callback previously registered with :meth:`register_presence_callback`. :param type_: Presence type to listen for. :type type_: :class:`~.PresenceType` :param from_: Sender JID to listen for, or :data:`None` for a wildcard match. :type from_: :class:`~aioxmpp.JID` or :data:`None`. :raises KeyError: if no callback is currently registered for the given ``(type_, from_)`` pair :raises ValueError: if `type_` is not a valid :class:`~.PresenceType` (and cannot be cast to a :class:`~.PresenceType`) The match is made on the exact pair; it is not possible to unregister arbitrary listeners by passing :data:`None` to the `from_` arguments (i.e. the wildcarding only applies for receiving stanzas, not for unregistering callbacks; unregistering a wildcard match with `from_` set to :data:`None` is of course possible). .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.PresenceType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated. It is recommended to use :class:`aioxmpp.PresenceClient` instead. ### Response: def unregister_presence_callback(self, type_, from_): """ Unregister a callback previously registered with :meth:`register_presence_callback`. :param type_: Presence type to listen for. :type type_: :class:`~.PresenceType` :param from_: Sender JID to listen for, or :data:`None` for a wildcard match. :type from_: :class:`~aioxmpp.JID` or :data:`None`. :raises KeyError: if no callback is currently registered for the given ``(type_, from_)`` pair :raises ValueError: if `type_` is not a valid :class:`~.PresenceType` (and cannot be cast to a :class:`~.PresenceType`) The match is made on the exact pair; it is not possible to unregister arbitrary listeners by passing :data:`None` to the `from_` arguments (i.e. the wildcarding only applies for receiving stanzas, not for unregistering callbacks; unregistering a wildcard match with `from_` set to :data:`None` is of course possible). .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.PresenceType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated. It is recommended to use :class:`aioxmpp.PresenceClient` instead. """ type_ = self._coerce_enum(type_, structs.PresenceType) warnings.warn( "unregister_presence_callback is deprecated; use " "aioxmpp.dispatcher.SimplePresenceDispatcher or " "aioxmpp.PresenceClient instead", DeprecationWarning, stacklevel=2 ) self._xxx_presence_dispatcher.unregister_callback( type_, from_, )
def create_app(cls, port): """Create a new instance of FrontEndApp that listens on port with a hostname of 0.0.0.0 :param int port: The port FrontEndApp is to listen on :return: A new instance of FrontEndApp wrapped in GeventServer :rtype: GeventServer """ app = FrontEndApp() app_server = GeventServer(app, port=port, hostname='0.0.0.0') return app_server
Create a new instance of FrontEndApp that listens on port with a hostname of 0.0.0.0 :param int port: The port FrontEndApp is to listen on :return: A new instance of FrontEndApp wrapped in GeventServer :rtype: GeventServer
Below is the the instruction that describes the task: ### Input: Create a new instance of FrontEndApp that listens on port with a hostname of 0.0.0.0 :param int port: The port FrontEndApp is to listen on :return: A new instance of FrontEndApp wrapped in GeventServer :rtype: GeventServer ### Response: def create_app(cls, port): """Create a new instance of FrontEndApp that listens on port with a hostname of 0.0.0.0 :param int port: The port FrontEndApp is to listen on :return: A new instance of FrontEndApp wrapped in GeventServer :rtype: GeventServer """ app = FrontEndApp() app_server = GeventServer(app, port=port, hostname='0.0.0.0') return app_server
def new_text_cell(text=None): """Create a new text cell.""" cell = NotebookNode() if text is not None: cell.text = unicode(text) cell.cell_type = u'text' return cell
Create a new text cell.
Below is the the instruction that describes the task: ### Input: Create a new text cell. ### Response: def new_text_cell(text=None): """Create a new text cell.""" cell = NotebookNode() if text is not None: cell.text = unicode(text) cell.cell_type = u'text' return cell
def __similarity(s1, s2, ngrams_fn, n=3): """ The fraction of n-grams matching between two sequences Args: s1: a string s2: another string n: an int for the n in n-gram Returns: float: the fraction of n-grams matching """ ngrams1, ngrams2 = set(ngrams_fn(s1, n=n)), set(ngrams_fn(s2, n=n)) matches = ngrams1.intersection(ngrams2) return 2 * len(matches) / (len(ngrams1) + len(ngrams2))
The fraction of n-grams matching between two sequences Args: s1: a string s2: another string n: an int for the n in n-gram Returns: float: the fraction of n-grams matching
Below is the the instruction that describes the task: ### Input: The fraction of n-grams matching between two sequences Args: s1: a string s2: another string n: an int for the n in n-gram Returns: float: the fraction of n-grams matching ### Response: def __similarity(s1, s2, ngrams_fn, n=3): """ The fraction of n-grams matching between two sequences Args: s1: a string s2: another string n: an int for the n in n-gram Returns: float: the fraction of n-grams matching """ ngrams1, ngrams2 = set(ngrams_fn(s1, n=n)), set(ngrams_fn(s2, n=n)) matches = ngrams1.intersection(ngrams2) return 2 * len(matches) / (len(ngrams1) + len(ngrams2))
def fire(self, args, kwargs): """ Fire this signal with the specified arguments and keyword arguments. Typically this is used by using :meth:`__call__()` on this object which is more natural as it does all the argument packing/unpacking transparently. """ for info in self._listeners[:]: if info.pass_signal: info.listener(*args, signal=self, **kwargs) else: info.listener(*args, **kwargs)
Fire this signal with the specified arguments and keyword arguments. Typically this is used by using :meth:`__call__()` on this object which is more natural as it does all the argument packing/unpacking transparently.
Below is the the instruction that describes the task: ### Input: Fire this signal with the specified arguments and keyword arguments. Typically this is used by using :meth:`__call__()` on this object which is more natural as it does all the argument packing/unpacking transparently. ### Response: def fire(self, args, kwargs): """ Fire this signal with the specified arguments and keyword arguments. Typically this is used by using :meth:`__call__()` on this object which is more natural as it does all the argument packing/unpacking transparently. """ for info in self._listeners[:]: if info.pass_signal: info.listener(*args, signal=self, **kwargs) else: info.listener(*args, **kwargs)
def store_memory_object(self, mo, overwrite=True): """ This function optimizes a large store by storing a single reference to the :class:`SimMemoryObject` instead of one for each byte. :param memory_object: the memory object to store """ for p in self._containing_pages_mo(mo): self._apply_object_to_page(p, mo, overwrite=overwrite) self._update_range_mappings(mo.base, mo.object, mo.length)
This function optimizes a large store by storing a single reference to the :class:`SimMemoryObject` instead of one for each byte. :param memory_object: the memory object to store
Below is the the instruction that describes the task: ### Input: This function optimizes a large store by storing a single reference to the :class:`SimMemoryObject` instead of one for each byte. :param memory_object: the memory object to store ### Response: def store_memory_object(self, mo, overwrite=True): """ This function optimizes a large store by storing a single reference to the :class:`SimMemoryObject` instead of one for each byte. :param memory_object: the memory object to store """ for p in self._containing_pages_mo(mo): self._apply_object_to_page(p, mo, overwrite=overwrite) self._update_range_mappings(mo.base, mo.object, mo.length)
def stop(self): """ Stop the server. """ log.debug("Stopping listeners") self.queue_lock.acquire() for s in self.listeners: log.debug("Stopping {}".format(s)) s.shutdown() s.socket.close() self.cancel_queue() for t in self.threads: t.join() self.listeners = [] self.threads = [] self.is_running = False self.queue_lock.release() log.debug("Listeners stopped and threads joined")
Stop the server.
Below is the the instruction that describes the task: ### Input: Stop the server. ### Response: def stop(self): """ Stop the server. """ log.debug("Stopping listeners") self.queue_lock.acquire() for s in self.listeners: log.debug("Stopping {}".format(s)) s.shutdown() s.socket.close() self.cancel_queue() for t in self.threads: t.join() self.listeners = [] self.threads = [] self.is_running = False self.queue_lock.release() log.debug("Listeners stopped and threads joined")
def plot_eigh2(self, colorbar=True, cb_orientation='vertical', cb_label=None, ax=None, show=True, fname=None, **kwargs): """ Plot the second eigenvalue of the horizontal tensor. Usage ----- x.plot_eigh2([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$\lambda_{h2}$, Eotvos$^{-1}$' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods. """ if cb_label is None: cb_label = self._eigh2_label if self.eigh2 is None: self.compute_eigh() if ax is None: fig, axes = self.eigh2.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, show=False, **kwargs) if show: fig.show() if fname is not None: fig.savefig(fname) return fig, axes else: self.eigh2.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, ax=ax, **kwargs)
Plot the second eigenvalue of the horizontal tensor. Usage ----- x.plot_eigh2([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$\lambda_{h2}$, Eotvos$^{-1}$' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods.
Below is the the instruction that describes the task: ### Input: Plot the second eigenvalue of the horizontal tensor. Usage ----- x.plot_eigh2([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$\lambda_{h2}$, Eotvos$^{-1}$' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods. ### Response: def plot_eigh2(self, colorbar=True, cb_orientation='vertical', cb_label=None, ax=None, show=True, fname=None, **kwargs): """ Plot the second eigenvalue of the horizontal tensor. Usage ----- x.plot_eigh2([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$\lambda_{h2}$, Eotvos$^{-1}$' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods. """ if cb_label is None: cb_label = self._eigh2_label if self.eigh2 is None: self.compute_eigh() if ax is None: fig, axes = self.eigh2.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, show=False, **kwargs) if show: fig.show() if fname is not None: fig.savefig(fname) return fig, axes else: self.eigh2.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, ax=ax, **kwargs)
def objectprep(self): """ Creates fastq files from an in-progress Illumina MiSeq run or create an object and moves files appropriately """ # Create .fastq files if necessary. Otherwise create the metadata object if self.bcltofastq: if self.customsamplesheet: assert os.path.isfile(self.customsamplesheet), 'Cannot find custom sample sheet as specified {}' \ .format(self.customsamplesheet) # Create the FASTQ files self.samples = fastqCreator.CreateFastq(self) # Create a dictionary of the object samples_dict = vars(self.samples) # Extract the required information from the dictionary self.index = samples_dict['index'] self.index_length = samples_dict['indexlength'] self.forward = samples_dict['forwardlength'] self.reverse = samples_dict['reverselength'] self.forwardlength = samples_dict['forward'] self.reverselength = samples_dict['reverse'] self.header = samples_dict['header'] else: self.samples = createObject.ObjectCreation(self)
Creates fastq files from an in-progress Illumina MiSeq run or create an object and moves files appropriately
Below is the the instruction that describes the task: ### Input: Creates fastq files from an in-progress Illumina MiSeq run or create an object and moves files appropriately ### Response: def objectprep(self): """ Creates fastq files from an in-progress Illumina MiSeq run or create an object and moves files appropriately """ # Create .fastq files if necessary. Otherwise create the metadata object if self.bcltofastq: if self.customsamplesheet: assert os.path.isfile(self.customsamplesheet), 'Cannot find custom sample sheet as specified {}' \ .format(self.customsamplesheet) # Create the FASTQ files self.samples = fastqCreator.CreateFastq(self) # Create a dictionary of the object samples_dict = vars(self.samples) # Extract the required information from the dictionary self.index = samples_dict['index'] self.index_length = samples_dict['indexlength'] self.forward = samples_dict['forwardlength'] self.reverse = samples_dict['reverselength'] self.forwardlength = samples_dict['forward'] self.reverselength = samples_dict['reverse'] self.header = samples_dict['header'] else: self.samples = createObject.ObjectCreation(self)
def watch(ctx): """Automatically run build whenever a relevant file changes. """ watcher = Watcher(ctx) watcher.watch_directory( path='{pkg.source_less}', ext='.less', action=lambda e: build(ctx, less=True) ) watcher.watch_directory( path='{pkg.source_js}', ext='.jsx', action=lambda e: build(ctx, js=True) ) watcher.watch_directory( path='{pkg.docs}', ext='.rst', action=lambda e: build(ctx, docs=True) ) watcher.start()
Automatically run build whenever a relevant file changes.
Below is the the instruction that describes the task: ### Input: Automatically run build whenever a relevant file changes. ### Response: def watch(ctx): """Automatically run build whenever a relevant file changes. """ watcher = Watcher(ctx) watcher.watch_directory( path='{pkg.source_less}', ext='.less', action=lambda e: build(ctx, less=True) ) watcher.watch_directory( path='{pkg.source_js}', ext='.jsx', action=lambda e: build(ctx, js=True) ) watcher.watch_directory( path='{pkg.docs}', ext='.rst', action=lambda e: build(ctx, docs=True) ) watcher.start()
def list_theme(): """List all available Engineer themes.""" from engineer.themes import ThemeManager themes = ThemeManager.themes() col1, col2 = map(max, zip(*[(len(t.id) + 2, len(t.root_path) + 2) for t in themes.itervalues()])) themes = ThemeManager.themes_by_finder() for finder in sorted(themes.iterkeys()): if len(themes[finder]) > 0: puts("%s: " % finder) for theme in sorted(themes[finder], key=lambda _: _.id): with indent(4): puts( columns( [colored.cyan("%s:" % theme.id), col1], [colored.white(theme.root_path, bold=True), col2] ) )
List all available Engineer themes.
Below is the the instruction that describes the task: ### Input: List all available Engineer themes. ### Response: def list_theme(): """List all available Engineer themes.""" from engineer.themes import ThemeManager themes = ThemeManager.themes() col1, col2 = map(max, zip(*[(len(t.id) + 2, len(t.root_path) + 2) for t in themes.itervalues()])) themes = ThemeManager.themes_by_finder() for finder in sorted(themes.iterkeys()): if len(themes[finder]) > 0: puts("%s: " % finder) for theme in sorted(themes[finder], key=lambda _: _.id): with indent(4): puts( columns( [colored.cyan("%s:" % theme.id), col1], [colored.white(theme.root_path, bold=True), col2] ) )
def _expand_syntax_quote( ctx: ReaderContext, form: IterableLispForm ) -> Iterable[LispForm]: """Expand syntax quoted forms to handle unquoting and unquote-splicing. The unquoted form (unquote x) becomes: (list x) The unquote-spliced form (unquote-splicing x) becomes x All other forms are recursively processed as by _process_syntax_quoted_form and are returned as: (list form)""" expanded = [] for elem in form: if _is_unquote(elem): expanded.append(llist.l(_LIST, elem[1])) elif _is_unquote_splicing(elem): expanded.append(elem[1]) else: expanded.append(llist.l(_LIST, _process_syntax_quoted_form(ctx, elem))) return expanded
Expand syntax quoted forms to handle unquoting and unquote-splicing. The unquoted form (unquote x) becomes: (list x) The unquote-spliced form (unquote-splicing x) becomes x All other forms are recursively processed as by _process_syntax_quoted_form and are returned as: (list form)
Below is the the instruction that describes the task: ### Input: Expand syntax quoted forms to handle unquoting and unquote-splicing. The unquoted form (unquote x) becomes: (list x) The unquote-spliced form (unquote-splicing x) becomes x All other forms are recursively processed as by _process_syntax_quoted_form and are returned as: (list form) ### Response: def _expand_syntax_quote( ctx: ReaderContext, form: IterableLispForm ) -> Iterable[LispForm]: """Expand syntax quoted forms to handle unquoting and unquote-splicing. The unquoted form (unquote x) becomes: (list x) The unquote-spliced form (unquote-splicing x) becomes x All other forms are recursively processed as by _process_syntax_quoted_form and are returned as: (list form)""" expanded = [] for elem in form: if _is_unquote(elem): expanded.append(llist.l(_LIST, elem[1])) elif _is_unquote_splicing(elem): expanded.append(elem[1]) else: expanded.append(llist.l(_LIST, _process_syntax_quoted_form(ctx, elem))) return expanded
def _search(self, base, fltr, attrs=None, scope=ldap.SCOPE_SUBTREE): """Perform LDAP search""" try: results = self._conn.search_s(base, scope, fltr, attrs) except Exception as e: log.exception(self._get_ldap_msg(e)) results = False return results
Perform LDAP search
Below is the the instruction that describes the task: ### Input: Perform LDAP search ### Response: def _search(self, base, fltr, attrs=None, scope=ldap.SCOPE_SUBTREE): """Perform LDAP search""" try: results = self._conn.search_s(base, scope, fltr, attrs) except Exception as e: log.exception(self._get_ldap_msg(e)) results = False return results
def kick_chat_member(self, chat_id, user_id, until_date=None): """ Use this method to kick a user from a group or a supergroup. :param chat_id: Int or string : Unique identifier for the target group or username of the target supergroup :param user_id: Int : Unique identifier of the target user :param until_date: Date when the user will be unbanned, unix time. If user is banned for more than 366 days or less than 30 seconds from the current time they are considered to be banned forever :return: types.Message """ return apihelper.kick_chat_member(self.token, chat_id, user_id, until_date)
Use this method to kick a user from a group or a supergroup. :param chat_id: Int or string : Unique identifier for the target group or username of the target supergroup :param user_id: Int : Unique identifier of the target user :param until_date: Date when the user will be unbanned, unix time. If user is banned for more than 366 days or less than 30 seconds from the current time they are considered to be banned forever :return: types.Message
Below is the the instruction that describes the task: ### Input: Use this method to kick a user from a group or a supergroup. :param chat_id: Int or string : Unique identifier for the target group or username of the target supergroup :param user_id: Int : Unique identifier of the target user :param until_date: Date when the user will be unbanned, unix time. If user is banned for more than 366 days or less than 30 seconds from the current time they are considered to be banned forever :return: types.Message ### Response: def kick_chat_member(self, chat_id, user_id, until_date=None): """ Use this method to kick a user from a group or a supergroup. :param chat_id: Int or string : Unique identifier for the target group or username of the target supergroup :param user_id: Int : Unique identifier of the target user :param until_date: Date when the user will be unbanned, unix time. If user is banned for more than 366 days or less than 30 seconds from the current time they are considered to be banned forever :return: types.Message """ return apihelper.kick_chat_member(self.token, chat_id, user_id, until_date)
def download_static_assets(doc, destination, base_url, request_fn=make_request, url_blacklist=[], js_middleware=None, css_middleware=None, derive_filename=_derive_filename): """ Download all static assets referenced from an HTML page. The goal is to easily create HTML5 apps! Downloads JS, CSS, images, and audio clips. Args: doc: The HTML page source as a string or BeautifulSoup instance. destination: The folder to download the static assets to! base_url: The base URL where assets will be downloaded from. request_fn: The function to be called to make requests, passed to ricecooker.utils.html.download_file(). Pass in a custom one for custom caching logic. url_blacklist: A list of keywords of files to not include in downloading. Will do substring matching, so e.g. 'acorn.js' will match '/some/path/to/acorn.js'. js_middleware: If specificed, JS content will be passed into this callback which is expected to return JS content with any modifications. css_middleware: If specificed, CSS content will be passed into this callback which is expected to return CSS content with any modifications. Return the modified page HTML with links rewritten to the locations of the downloaded static files, as a BeautifulSoup object. (Call str() on it to extract the raw HTML.) """ if not isinstance(doc, BeautifulSoup): doc = BeautifulSoup(doc, "html.parser") # Helper function to download all assets for a given CSS selector. def download_assets(selector, attr, url_middleware=None, content_middleware=None, node_filter=None): nodes = doc.select(selector) for i, node in enumerate(nodes): if node_filter: if not node_filter(node): src = node[attr] node[attr] = '' print(' Skipping node with src ', src) continue if node[attr].startswith('data:'): continue url = urljoin(base_url, node[attr]) if _is_blacklisted(url, url_blacklist): print(' Skipping downloading blacklisted url', url) node[attr] = "" continue if url_middleware: url = url_middleware(url) filename = derive_filename(url) node[attr] = filename print(" Downloading", url, "to filename", filename) download_file(url, destination, request_fn=request_fn, filename=filename, middleware_callbacks=content_middleware) def js_content_middleware(content, url, **kwargs): if js_middleware: content = js_middleware(content, url, **kwargs) # Polyfill localStorage and document.cookie as iframes can't access # them return (content .replace("localStorage", "_localStorage") .replace('document.cookie.split', '"".split') .replace('document.cookie', 'window._document_cookie')) def css_node_filter(node): return "stylesheet" in node["rel"] def css_content_middleware(content, url, **kwargs): if css_middleware: content = css_middleware(content, url, **kwargs) file_dir = os.path.dirname(urlparse(url).path) # Download linked fonts and images def repl(match): src = match.group(1) if src.startswith('//localhost'): return 'url()' # Don't download data: files if src.startswith('data:'): return match.group(0) src_url = urljoin(base_url, os.path.join(file_dir, src)) if _is_blacklisted(src_url, url_blacklist): print(' Skipping downloading blacklisted url', src_url) return 'url()' derived_filename = derive_filename(src_url) download_file(src_url, destination, request_fn=request_fn, filename=derived_filename) return 'url("%s")' % derived_filename return _CSS_URL_RE.sub(repl, content) # Download all linked static assets. download_assets("img[src]", "src") # Images download_assets("link[href]", "href", content_middleware=css_content_middleware, node_filter=css_node_filter) # CSS download_assets("script[src]", "src", content_middleware=js_content_middleware) # JS download_assets("source[src]", "src") # Potentially audio download_assets("source[srcset]", "srcset") # Potentially audio # ... and also run the middleware on CSS/JS embedded in the page source to # get linked files. for node in doc.select('style'): node.string = css_content_middleware(node.get_text(), url='') for node in doc.select('script'): if not node.attrs.get('src'): node.string = js_content_middleware(node.get_text(), url='') return doc
Download all static assets referenced from an HTML page. The goal is to easily create HTML5 apps! Downloads JS, CSS, images, and audio clips. Args: doc: The HTML page source as a string or BeautifulSoup instance. destination: The folder to download the static assets to! base_url: The base URL where assets will be downloaded from. request_fn: The function to be called to make requests, passed to ricecooker.utils.html.download_file(). Pass in a custom one for custom caching logic. url_blacklist: A list of keywords of files to not include in downloading. Will do substring matching, so e.g. 'acorn.js' will match '/some/path/to/acorn.js'. js_middleware: If specificed, JS content will be passed into this callback which is expected to return JS content with any modifications. css_middleware: If specificed, CSS content will be passed into this callback which is expected to return CSS content with any modifications. Return the modified page HTML with links rewritten to the locations of the downloaded static files, as a BeautifulSoup object. (Call str() on it to extract the raw HTML.)
Below is the the instruction that describes the task: ### Input: Download all static assets referenced from an HTML page. The goal is to easily create HTML5 apps! Downloads JS, CSS, images, and audio clips. Args: doc: The HTML page source as a string or BeautifulSoup instance. destination: The folder to download the static assets to! base_url: The base URL where assets will be downloaded from. request_fn: The function to be called to make requests, passed to ricecooker.utils.html.download_file(). Pass in a custom one for custom caching logic. url_blacklist: A list of keywords of files to not include in downloading. Will do substring matching, so e.g. 'acorn.js' will match '/some/path/to/acorn.js'. js_middleware: If specificed, JS content will be passed into this callback which is expected to return JS content with any modifications. css_middleware: If specificed, CSS content will be passed into this callback which is expected to return CSS content with any modifications. Return the modified page HTML with links rewritten to the locations of the downloaded static files, as a BeautifulSoup object. (Call str() on it to extract the raw HTML.) ### Response: def download_static_assets(doc, destination, base_url, request_fn=make_request, url_blacklist=[], js_middleware=None, css_middleware=None, derive_filename=_derive_filename): """ Download all static assets referenced from an HTML page. The goal is to easily create HTML5 apps! Downloads JS, CSS, images, and audio clips. Args: doc: The HTML page source as a string or BeautifulSoup instance. destination: The folder to download the static assets to! base_url: The base URL where assets will be downloaded from. request_fn: The function to be called to make requests, passed to ricecooker.utils.html.download_file(). Pass in a custom one for custom caching logic. url_blacklist: A list of keywords of files to not include in downloading. Will do substring matching, so e.g. 'acorn.js' will match '/some/path/to/acorn.js'. js_middleware: If specificed, JS content will be passed into this callback which is expected to return JS content with any modifications. css_middleware: If specificed, CSS content will be passed into this callback which is expected to return CSS content with any modifications. Return the modified page HTML with links rewritten to the locations of the downloaded static files, as a BeautifulSoup object. (Call str() on it to extract the raw HTML.) """ if not isinstance(doc, BeautifulSoup): doc = BeautifulSoup(doc, "html.parser") # Helper function to download all assets for a given CSS selector. def download_assets(selector, attr, url_middleware=None, content_middleware=None, node_filter=None): nodes = doc.select(selector) for i, node in enumerate(nodes): if node_filter: if not node_filter(node): src = node[attr] node[attr] = '' print(' Skipping node with src ', src) continue if node[attr].startswith('data:'): continue url = urljoin(base_url, node[attr]) if _is_blacklisted(url, url_blacklist): print(' Skipping downloading blacklisted url', url) node[attr] = "" continue if url_middleware: url = url_middleware(url) filename = derive_filename(url) node[attr] = filename print(" Downloading", url, "to filename", filename) download_file(url, destination, request_fn=request_fn, filename=filename, middleware_callbacks=content_middleware) def js_content_middleware(content, url, **kwargs): if js_middleware: content = js_middleware(content, url, **kwargs) # Polyfill localStorage and document.cookie as iframes can't access # them return (content .replace("localStorage", "_localStorage") .replace('document.cookie.split', '"".split') .replace('document.cookie', 'window._document_cookie')) def css_node_filter(node): return "stylesheet" in node["rel"] def css_content_middleware(content, url, **kwargs): if css_middleware: content = css_middleware(content, url, **kwargs) file_dir = os.path.dirname(urlparse(url).path) # Download linked fonts and images def repl(match): src = match.group(1) if src.startswith('//localhost'): return 'url()' # Don't download data: files if src.startswith('data:'): return match.group(0) src_url = urljoin(base_url, os.path.join(file_dir, src)) if _is_blacklisted(src_url, url_blacklist): print(' Skipping downloading blacklisted url', src_url) return 'url()' derived_filename = derive_filename(src_url) download_file(src_url, destination, request_fn=request_fn, filename=derived_filename) return 'url("%s")' % derived_filename return _CSS_URL_RE.sub(repl, content) # Download all linked static assets. download_assets("img[src]", "src") # Images download_assets("link[href]", "href", content_middleware=css_content_middleware, node_filter=css_node_filter) # CSS download_assets("script[src]", "src", content_middleware=js_content_middleware) # JS download_assets("source[src]", "src") # Potentially audio download_assets("source[srcset]", "srcset") # Potentially audio # ... and also run the middleware on CSS/JS embedded in the page source to # get linked files. for node in doc.select('style'): node.string = css_content_middleware(node.get_text(), url='') for node in doc.select('script'): if not node.attrs.get('src'): node.string = js_content_middleware(node.get_text(), url='') return doc
def basic_network(cm=False): """A 3-node network of logic gates. Diagram:: +~~~~~~~~+ +~~~~>| A |<~~~~+ | | (OR) +~~~+ | | +~~~~~~~~+ | | | | | | v | +~+~~~~~~+ +~~~~~+~+ | B |<~~~~~~+ C | | (COPY) +~~~~~~>| (XOR) | +~~~~~~~~+ +~~~~~~~+ TPM: +----------------+---------------+ | Previous state | Current state | +----------------+---------------+ | A, B, C | A, B, C | +================+===============+ | 0, 0, 0 | 0, 0, 0 | +----------------+---------------+ | 1, 0, 0 | 0, 0, 1 | +----------------+---------------+ | 0, 1, 0 | 1, 0, 1 | +----------------+---------------+ | 1, 1, 0 | 1, 0, 0 | +----------------+---------------+ | 0, 0, 1 | 1, 1, 0 | +----------------+---------------+ | 1, 0, 1 | 1, 1, 1 | +----------------+---------------+ | 0, 1, 1 | 1, 1, 1 | +----------------+---------------+ | 1, 1, 1 | 1, 1, 0 | +----------------+---------------+ Connectivity matrix: +---+---+---+---+ | . | A | B | C | +---+---+---+---+ | A | 0 | 0 | 1 | +---+---+---+---+ | B | 1 | 0 | 1 | +---+---+---+---+ | C | 1 | 1 | 0 | +---+---+---+---+ .. note:: |CM[i][j] = 1| means that there is a directed edge |(i,j)| from node |i| to node |j| and |CM[i][j] = 0| means there is no edge from |i| to |j|. """ tpm = np.array([ [0, 0, 0], [0, 0, 1], [1, 0, 1], [1, 0, 0], [1, 1, 0], [1, 1, 1], [1, 1, 1], [1, 1, 0] ]) if cm is False: cm = np.array([ [0, 0, 1], [1, 0, 1], [1, 1, 0] ]) else: cm = None return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])
A 3-node network of logic gates. Diagram:: +~~~~~~~~+ +~~~~>| A |<~~~~+ | | (OR) +~~~+ | | +~~~~~~~~+ | | | | | | v | +~+~~~~~~+ +~~~~~+~+ | B |<~~~~~~+ C | | (COPY) +~~~~~~>| (XOR) | +~~~~~~~~+ +~~~~~~~+ TPM: +----------------+---------------+ | Previous state | Current state | +----------------+---------------+ | A, B, C | A, B, C | +================+===============+ | 0, 0, 0 | 0, 0, 0 | +----------------+---------------+ | 1, 0, 0 | 0, 0, 1 | +----------------+---------------+ | 0, 1, 0 | 1, 0, 1 | +----------------+---------------+ | 1, 1, 0 | 1, 0, 0 | +----------------+---------------+ | 0, 0, 1 | 1, 1, 0 | +----------------+---------------+ | 1, 0, 1 | 1, 1, 1 | +----------------+---------------+ | 0, 1, 1 | 1, 1, 1 | +----------------+---------------+ | 1, 1, 1 | 1, 1, 0 | +----------------+---------------+ Connectivity matrix: +---+---+---+---+ | . | A | B | C | +---+---+---+---+ | A | 0 | 0 | 1 | +---+---+---+---+ | B | 1 | 0 | 1 | +---+---+---+---+ | C | 1 | 1 | 0 | +---+---+---+---+ .. note:: |CM[i][j] = 1| means that there is a directed edge |(i,j)| from node |i| to node |j| and |CM[i][j] = 0| means there is no edge from |i| to |j|.
Below is the the instruction that describes the task: ### Input: A 3-node network of logic gates. Diagram:: +~~~~~~~~+ +~~~~>| A |<~~~~+ | | (OR) +~~~+ | | +~~~~~~~~+ | | | | | | v | +~+~~~~~~+ +~~~~~+~+ | B |<~~~~~~+ C | | (COPY) +~~~~~~>| (XOR) | +~~~~~~~~+ +~~~~~~~+ TPM: +----------------+---------------+ | Previous state | Current state | +----------------+---------------+ | A, B, C | A, B, C | +================+===============+ | 0, 0, 0 | 0, 0, 0 | +----------------+---------------+ | 1, 0, 0 | 0, 0, 1 | +----------------+---------------+ | 0, 1, 0 | 1, 0, 1 | +----------------+---------------+ | 1, 1, 0 | 1, 0, 0 | +----------------+---------------+ | 0, 0, 1 | 1, 1, 0 | +----------------+---------------+ | 1, 0, 1 | 1, 1, 1 | +----------------+---------------+ | 0, 1, 1 | 1, 1, 1 | +----------------+---------------+ | 1, 1, 1 | 1, 1, 0 | +----------------+---------------+ Connectivity matrix: +---+---+---+---+ | . | A | B | C | +---+---+---+---+ | A | 0 | 0 | 1 | +---+---+---+---+ | B | 1 | 0 | 1 | +---+---+---+---+ | C | 1 | 1 | 0 | +---+---+---+---+ .. note:: |CM[i][j] = 1| means that there is a directed edge |(i,j)| from node |i| to node |j| and |CM[i][j] = 0| means there is no edge from |i| to |j|. ### Response: def basic_network(cm=False): """A 3-node network of logic gates. Diagram:: +~~~~~~~~+ +~~~~>| A |<~~~~+ | | (OR) +~~~+ | | +~~~~~~~~+ | | | | | | v | +~+~~~~~~+ +~~~~~+~+ | B |<~~~~~~+ C | | (COPY) +~~~~~~>| (XOR) | +~~~~~~~~+ +~~~~~~~+ TPM: +----------------+---------------+ | Previous state | Current state | +----------------+---------------+ | A, B, C | A, B, C | +================+===============+ | 0, 0, 0 | 0, 0, 0 | +----------------+---------------+ | 1, 0, 0 | 0, 0, 1 | +----------------+---------------+ | 0, 1, 0 | 1, 0, 1 | +----------------+---------------+ | 1, 1, 0 | 1, 0, 0 | +----------------+---------------+ | 0, 0, 1 | 1, 1, 0 | +----------------+---------------+ | 1, 0, 1 | 1, 1, 1 | +----------------+---------------+ | 0, 1, 1 | 1, 1, 1 | +----------------+---------------+ | 1, 1, 1 | 1, 1, 0 | +----------------+---------------+ Connectivity matrix: +---+---+---+---+ | . | A | B | C | +---+---+---+---+ | A | 0 | 0 | 1 | +---+---+---+---+ | B | 1 | 0 | 1 | +---+---+---+---+ | C | 1 | 1 | 0 | +---+---+---+---+ .. note:: |CM[i][j] = 1| means that there is a directed edge |(i,j)| from node |i| to node |j| and |CM[i][j] = 0| means there is no edge from |i| to |j|. """ tpm = np.array([ [0, 0, 0], [0, 0, 1], [1, 0, 1], [1, 0, 0], [1, 1, 0], [1, 1, 1], [1, 1, 1], [1, 1, 0] ]) if cm is False: cm = np.array([ [0, 0, 1], [1, 0, 1], [1, 1, 0] ]) else: cm = None return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])
def get_session(self): """Create Session to store credentials. Returns (Session) A Session object with OAuth 2.0 credentials. """ response = _request_access_token( grant_type=auth.CLIENT_CREDENTIALS_GRANT, client_id=self.client_id, client_secret=self.client_secret, scopes=self.scopes, ) oauth2credential = OAuth2Credential.make_from_response( response=response, grant_type=auth.CLIENT_CREDENTIALS_GRANT, client_id=self.client_id, client_secret=self.client_secret, ) return Session(oauth2credential=oauth2credential)
Create Session to store credentials. Returns (Session) A Session object with OAuth 2.0 credentials.
Below is the the instruction that describes the task: ### Input: Create Session to store credentials. Returns (Session) A Session object with OAuth 2.0 credentials. ### Response: def get_session(self): """Create Session to store credentials. Returns (Session) A Session object with OAuth 2.0 credentials. """ response = _request_access_token( grant_type=auth.CLIENT_CREDENTIALS_GRANT, client_id=self.client_id, client_secret=self.client_secret, scopes=self.scopes, ) oauth2credential = OAuth2Credential.make_from_response( response=response, grant_type=auth.CLIENT_CREDENTIALS_GRANT, client_id=self.client_id, client_secret=self.client_secret, ) return Session(oauth2credential=oauth2credential)
def one(script, value=None, vars={}, url=None, opener=default_opener, library_paths=[]): """ Transform object by jq script, returning the first result. Raise ValueError unless results does not include exactly one element. """ return compile(script, vars, library_paths).one(_get_value(value, url, opener))
Transform object by jq script, returning the first result. Raise ValueError unless results does not include exactly one element.
Below is the the instruction that describes the task: ### Input: Transform object by jq script, returning the first result. Raise ValueError unless results does not include exactly one element. ### Response: def one(script, value=None, vars={}, url=None, opener=default_opener, library_paths=[]): """ Transform object by jq script, returning the first result. Raise ValueError unless results does not include exactly one element. """ return compile(script, vars, library_paths).one(_get_value(value, url, opener))
def fill_nan(self, val: str, *cols): """ Fill NaN values with new values in the main dataframe :param val: new value :type val: str :param \*cols: names of the colums :type \*cols: str, at least one :example: ``ds.fill_nan("new value", "mycol1", "mycol2")`` """ df = self._fill_nan(val, *cols) if df is not None: self.df = df else: self.err("Can not fill nan values")
Fill NaN values with new values in the main dataframe :param val: new value :type val: str :param \*cols: names of the colums :type \*cols: str, at least one :example: ``ds.fill_nan("new value", "mycol1", "mycol2")``
Below is the the instruction that describes the task: ### Input: Fill NaN values with new values in the main dataframe :param val: new value :type val: str :param \*cols: names of the colums :type \*cols: str, at least one :example: ``ds.fill_nan("new value", "mycol1", "mycol2")`` ### Response: def fill_nan(self, val: str, *cols): """ Fill NaN values with new values in the main dataframe :param val: new value :type val: str :param \*cols: names of the colums :type \*cols: str, at least one :example: ``ds.fill_nan("new value", "mycol1", "mycol2")`` """ df = self._fill_nan(val, *cols) if df is not None: self.df = df else: self.err("Can not fill nan values")
def on_post(self, req, resp): """ Validate the access token request for spec compliance The spec also dictates the JSON based error response on failure & is handled in this responder. """ grant_type = req.get_param('grant_type') password = req.get_param('password') username = req.get_param('username') # errors or not, disable client caching along the way # per the spec resp.disable_caching() if not grant_type or not password or not username: resp.status = falcon.HTTP_400 resp.serialize({ 'error': 'invalid_request', 'error_description': 'A grant_type, username, & password ' 'parameters are all required when ' 'requesting an OAuth access_token', 'error_uri': 'tools.ietf.org/html/rfc6749#section-4.3.2', }) elif grant_type != 'password': resp.status = falcon.HTTP_400 resp.serialize({ 'error': 'unsupported_grant_type', 'error_description': 'The grant_type parameter MUST be set ' 'to "password" not "%s"' % grant_type, 'error_uri': 'tools.ietf.org/html/rfc6749#section-4.3.2', }) else: try: token = self.auth_creds(username, password) resp.serialize({ 'access_token': token, 'token_type': 'Bearer', }) except AuthRejected as exc: resp.status = falcon.HTTP_401 resp.set_header('WWW-Authenticate', self._realm) resp.serialize({ 'error': 'invalid_client', 'error_description': exc.detail, })
Validate the access token request for spec compliance The spec also dictates the JSON based error response on failure & is handled in this responder.
Below is the the instruction that describes the task: ### Input: Validate the access token request for spec compliance The spec also dictates the JSON based error response on failure & is handled in this responder. ### Response: def on_post(self, req, resp): """ Validate the access token request for spec compliance The spec also dictates the JSON based error response on failure & is handled in this responder. """ grant_type = req.get_param('grant_type') password = req.get_param('password') username = req.get_param('username') # errors or not, disable client caching along the way # per the spec resp.disable_caching() if not grant_type or not password or not username: resp.status = falcon.HTTP_400 resp.serialize({ 'error': 'invalid_request', 'error_description': 'A grant_type, username, & password ' 'parameters are all required when ' 'requesting an OAuth access_token', 'error_uri': 'tools.ietf.org/html/rfc6749#section-4.3.2', }) elif grant_type != 'password': resp.status = falcon.HTTP_400 resp.serialize({ 'error': 'unsupported_grant_type', 'error_description': 'The grant_type parameter MUST be set ' 'to "password" not "%s"' % grant_type, 'error_uri': 'tools.ietf.org/html/rfc6749#section-4.3.2', }) else: try: token = self.auth_creds(username, password) resp.serialize({ 'access_token': token, 'token_type': 'Bearer', }) except AuthRejected as exc: resp.status = falcon.HTTP_401 resp.set_header('WWW-Authenticate', self._realm) resp.serialize({ 'error': 'invalid_client', 'error_description': exc.detail, })
def extract_source_planes_strikes_dips(src): """ Extract strike and dip angles for source defined by multiple planes. """ if "characteristicFaultSource" not in src.tag: strikes = dict([(key, None) for key, _ in PLANES_STRIKES_PARAM]) dips = dict([(key, None) for key, _ in PLANES_DIPS_PARAM]) return strikes, dips tags = get_taglist(src) surface_set = src.nodes[tags.index("surface")] strikes = [] dips = [] num_planes = 0 for surface in surface_set: if "planarSurface" in surface.tag: strikes.append(float(surface.attrib["strike"])) dips.append(float(surface.attrib["dip"])) num_planes += 1 if num_planes > MAX_PLANES: raise ValueError("Number of planes in sourcs %s exceededs maximum " "of %s" % (str(num_planes), str(MAX_PLANES))) if num_planes: strikes = expand_src_param(strikes, PLANES_STRIKES_PARAM) dips = expand_src_param(dips, PLANES_DIPS_PARAM) else: strikes = dict([(key, None) for key, _ in PLANES_STRIKES_PARAM]) dips = dict([(key, None) for key, _ in PLANES_DIPS_PARAM]) return strikes, dips
Extract strike and dip angles for source defined by multiple planes.
Below is the the instruction that describes the task: ### Input: Extract strike and dip angles for source defined by multiple planes. ### Response: def extract_source_planes_strikes_dips(src): """ Extract strike and dip angles for source defined by multiple planes. """ if "characteristicFaultSource" not in src.tag: strikes = dict([(key, None) for key, _ in PLANES_STRIKES_PARAM]) dips = dict([(key, None) for key, _ in PLANES_DIPS_PARAM]) return strikes, dips tags = get_taglist(src) surface_set = src.nodes[tags.index("surface")] strikes = [] dips = [] num_planes = 0 for surface in surface_set: if "planarSurface" in surface.tag: strikes.append(float(surface.attrib["strike"])) dips.append(float(surface.attrib["dip"])) num_planes += 1 if num_planes > MAX_PLANES: raise ValueError("Number of planes in sourcs %s exceededs maximum " "of %s" % (str(num_planes), str(MAX_PLANES))) if num_planes: strikes = expand_src_param(strikes, PLANES_STRIKES_PARAM) dips = expand_src_param(dips, PLANES_DIPS_PARAM) else: strikes = dict([(key, None) for key, _ in PLANES_STRIKES_PARAM]) dips = dict([(key, None) for key, _ in PLANES_DIPS_PARAM]) return strikes, dips
def get_lookup(self, operator): """Look up a lookup. :param operator: Name of the lookup operator """ try: return self._lookups[operator] except KeyError: raise NotImplementedError("Lookup operator '{}' is not supported".format(operator))
Look up a lookup. :param operator: Name of the lookup operator
Below is the the instruction that describes the task: ### Input: Look up a lookup. :param operator: Name of the lookup operator ### Response: def get_lookup(self, operator): """Look up a lookup. :param operator: Name of the lookup operator """ try: return self._lookups[operator] except KeyError: raise NotImplementedError("Lookup operator '{}' is not supported".format(operator))
def xzhdr(self, header, msgid_range=None): """XZHDR command. Args: msgid_range: A message-id as a string, or an article number as an integer, or a tuple of specifying a range of article numbers in the form (first, [last]) - if last is omitted then all articles after first are included. A msgid_range of None (the default) uses the current article. """ args = header if msgid_range is not None: args += " " + utils.unparse_msgid_range(msgid_range) code, message = self.command("XZHDR", args) if code != 221: raise NNTPReplyError(code, message) return self.info(code, message, compressed=True)
XZHDR command. Args: msgid_range: A message-id as a string, or an article number as an integer, or a tuple of specifying a range of article numbers in the form (first, [last]) - if last is omitted then all articles after first are included. A msgid_range of None (the default) uses the current article.
Below is the the instruction that describes the task: ### Input: XZHDR command. Args: msgid_range: A message-id as a string, or an article number as an integer, or a tuple of specifying a range of article numbers in the form (first, [last]) - if last is omitted then all articles after first are included. A msgid_range of None (the default) uses the current article. ### Response: def xzhdr(self, header, msgid_range=None): """XZHDR command. Args: msgid_range: A message-id as a string, or an article number as an integer, or a tuple of specifying a range of article numbers in the form (first, [last]) - if last is omitted then all articles after first are included. A msgid_range of None (the default) uses the current article. """ args = header if msgid_range is not None: args += " " + utils.unparse_msgid_range(msgid_range) code, message = self.command("XZHDR", args) if code != 221: raise NNTPReplyError(code, message) return self.info(code, message, compressed=True)
def get(cls, pid, session): """Get an idle, unused connection from the pool. Once a connection has been retrieved, it will be marked as in-use until it is freed. :param str pid: The pool ID :param queries.Session session: The session to assign to the connection :rtype: psycopg2.extensions.connection """ with cls._lock: cls._ensure_pool_exists(pid) return cls._pools[pid].get(session)
Get an idle, unused connection from the pool. Once a connection has been retrieved, it will be marked as in-use until it is freed. :param str pid: The pool ID :param queries.Session session: The session to assign to the connection :rtype: psycopg2.extensions.connection
Below is the the instruction that describes the task: ### Input: Get an idle, unused connection from the pool. Once a connection has been retrieved, it will be marked as in-use until it is freed. :param str pid: The pool ID :param queries.Session session: The session to assign to the connection :rtype: psycopg2.extensions.connection ### Response: def get(cls, pid, session): """Get an idle, unused connection from the pool. Once a connection has been retrieved, it will be marked as in-use until it is freed. :param str pid: The pool ID :param queries.Session session: The session to assign to the connection :rtype: psycopg2.extensions.connection """ with cls._lock: cls._ensure_pool_exists(pid) return cls._pools[pid].get(session)
def _add_temporary_results(self, results, label): """Adds `results` to a temporary table with `label`. :param results: results file :type results: `File` :param label: label to be associated with results :type label: `str` """ NGRAM, SIZE, NAME, SIGLUM, COUNT, LABEL = constants.QUERY_FIELDNAMES reader = csv.DictReader(results) data = [(row[NGRAM], row[SIZE], row[NAME], row[SIGLUM], row[COUNT], label) for row in reader] self._conn.executemany(constants.INSERT_TEMPORARY_RESULTS_SQL, data)
Adds `results` to a temporary table with `label`. :param results: results file :type results: `File` :param label: label to be associated with results :type label: `str`
Below is the the instruction that describes the task: ### Input: Adds `results` to a temporary table with `label`. :param results: results file :type results: `File` :param label: label to be associated with results :type label: `str` ### Response: def _add_temporary_results(self, results, label): """Adds `results` to a temporary table with `label`. :param results: results file :type results: `File` :param label: label to be associated with results :type label: `str` """ NGRAM, SIZE, NAME, SIGLUM, COUNT, LABEL = constants.QUERY_FIELDNAMES reader = csv.DictReader(results) data = [(row[NGRAM], row[SIZE], row[NAME], row[SIGLUM], row[COUNT], label) for row in reader] self._conn.executemany(constants.INSERT_TEMPORARY_RESULTS_SQL, data)
def tupletree(table, start='start', stop='stop', value=None): """ Construct an interval tree for the given table, where each node in the tree is a row of the table. """ import intervaltree tree = intervaltree.IntervalTree() it = iter(table) hdr = next(it) flds = list(map(text_type, hdr)) assert start in flds, 'start field not recognised' assert stop in flds, 'stop field not recognised' getstart = itemgetter(flds.index(start)) getstop = itemgetter(flds.index(stop)) if value is None: getvalue = tuple else: valueindices = asindices(hdr, value) assert len(valueindices) > 0, 'invalid value field specification' getvalue = itemgetter(*valueindices) for row in it: tree.addi(getstart(row), getstop(row), getvalue(row)) return tree
Construct an interval tree for the given table, where each node in the tree is a row of the table.
Below is the the instruction that describes the task: ### Input: Construct an interval tree for the given table, where each node in the tree is a row of the table. ### Response: def tupletree(table, start='start', stop='stop', value=None): """ Construct an interval tree for the given table, where each node in the tree is a row of the table. """ import intervaltree tree = intervaltree.IntervalTree() it = iter(table) hdr = next(it) flds = list(map(text_type, hdr)) assert start in flds, 'start field not recognised' assert stop in flds, 'stop field not recognised' getstart = itemgetter(flds.index(start)) getstop = itemgetter(flds.index(stop)) if value is None: getvalue = tuple else: valueindices = asindices(hdr, value) assert len(valueindices) > 0, 'invalid value field specification' getvalue = itemgetter(*valueindices) for row in it: tree.addi(getstart(row), getstop(row), getvalue(row)) return tree
def register(self, f, *args, **kwargs): """ Register a function and arguments to be called later. """ self._functions.append(lambda: f(*args, **kwargs))
Register a function and arguments to be called later.
Below is the the instruction that describes the task: ### Input: Register a function and arguments to be called later. ### Response: def register(self, f, *args, **kwargs): """ Register a function and arguments to be called later. """ self._functions.append(lambda: f(*args, **kwargs))
def get_list_key(self, data, key, header_lines=2): """Get the list of a key elements. Each element is a tuple (key=None, description, type=None). Note that the tuple's element can differ depending on the key. :param data: the data to proceed :param key: the key """ return super(NumpydocTools, self).get_list_key(data, key, header_lines=header_lines)
Get the list of a key elements. Each element is a tuple (key=None, description, type=None). Note that the tuple's element can differ depending on the key. :param data: the data to proceed :param key: the key
Below is the the instruction that describes the task: ### Input: Get the list of a key elements. Each element is a tuple (key=None, description, type=None). Note that the tuple's element can differ depending on the key. :param data: the data to proceed :param key: the key ### Response: def get_list_key(self, data, key, header_lines=2): """Get the list of a key elements. Each element is a tuple (key=None, description, type=None). Note that the tuple's element can differ depending on the key. :param data: the data to proceed :param key: the key """ return super(NumpydocTools, self).get_list_key(data, key, header_lines=header_lines)
def clone(self, project, folder="/", **kwargs): ''' :param project: Destination project ID :type project: string :param folder: Folder route to which to move the object :type folder: string :raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object :returns: An object handler for the new cloned object :rtype: :class:`DXDataObject` Clones the associated remote object to *folder* in *project* and returns an object handler for the new object in the destination project. ''' if self._proj is None: raise DXError("Clone called when a project ID was not associated with this object handler") dxpy.api.project_clone(self._proj, {"objects": [self._dxid], "project": project, "destination": folder}, **kwargs) cloned_copy = copy.copy(self) cloned_copy.set_ids(cloned_copy.get_id(), project) return cloned_copy
:param project: Destination project ID :type project: string :param folder: Folder route to which to move the object :type folder: string :raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object :returns: An object handler for the new cloned object :rtype: :class:`DXDataObject` Clones the associated remote object to *folder* in *project* and returns an object handler for the new object in the destination project.
Below is the the instruction that describes the task: ### Input: :param project: Destination project ID :type project: string :param folder: Folder route to which to move the object :type folder: string :raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object :returns: An object handler for the new cloned object :rtype: :class:`DXDataObject` Clones the associated remote object to *folder* in *project* and returns an object handler for the new object in the destination project. ### Response: def clone(self, project, folder="/", **kwargs): ''' :param project: Destination project ID :type project: string :param folder: Folder route to which to move the object :type folder: string :raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object :returns: An object handler for the new cloned object :rtype: :class:`DXDataObject` Clones the associated remote object to *folder* in *project* and returns an object handler for the new object in the destination project. ''' if self._proj is None: raise DXError("Clone called when a project ID was not associated with this object handler") dxpy.api.project_clone(self._proj, {"objects": [self._dxid], "project": project, "destination": folder}, **kwargs) cloned_copy = copy.copy(self) cloned_copy.set_ids(cloned_copy.get_id(), project) return cloned_copy
def update_trigger(self, service): """ update the date when occurs the trigger :param service: service object to update """ now = arrow.utcnow().to(settings.TIME_ZONE).format('YYYY-MM-DD HH:mm:ssZZ') TriggerService.objects.filter(id=service.id).update(date_triggered=now, consumer_failed=0, provider_failed=0, )
update the date when occurs the trigger :param service: service object to update
Below is the the instruction that describes the task: ### Input: update the date when occurs the trigger :param service: service object to update ### Response: def update_trigger(self, service): """ update the date when occurs the trigger :param service: service object to update """ now = arrow.utcnow().to(settings.TIME_ZONE).format('YYYY-MM-DD HH:mm:ssZZ') TriggerService.objects.filter(id=service.id).update(date_triggered=now, consumer_failed=0, provider_failed=0, )
def get_reserved_vlan_range(self, id_or_uri): """ Gets the reserved vlan ID range for the fabric. Note: This method is only available on HPE Synergy. Args: id_or_uri: ID or URI of fabric. Returns: dict: vlan-pool """ uri = self._client.build_uri(id_or_uri) + "/reserved-vlan-range" return self._client.get(uri)
Gets the reserved vlan ID range for the fabric. Note: This method is only available on HPE Synergy. Args: id_or_uri: ID or URI of fabric. Returns: dict: vlan-pool
Below is the the instruction that describes the task: ### Input: Gets the reserved vlan ID range for the fabric. Note: This method is only available on HPE Synergy. Args: id_or_uri: ID or URI of fabric. Returns: dict: vlan-pool ### Response: def get_reserved_vlan_range(self, id_or_uri): """ Gets the reserved vlan ID range for the fabric. Note: This method is only available on HPE Synergy. Args: id_or_uri: ID or URI of fabric. Returns: dict: vlan-pool """ uri = self._client.build_uri(id_or_uri) + "/reserved-vlan-range" return self._client.get(uri)
def assemble_content_aad(message_id, aad_content_string, seq_num, length): """Assembles the Body AAD string for a message body structure. :param message_id: Message ID :type message_id: str :param aad_content_string: ContentAADString object for frame type :type aad_content_string: aws_encryption_sdk.identifiers.ContentAADString :param seq_num: Sequence number of frame :type seq_num: int :param length: Content Length :type length: int :returns: Properly formatted AAD bytes for message body structure. :rtype: bytes :raises SerializationError: if aad_content_string is not known """ if not isinstance(aad_content_string, aws_encryption_sdk.identifiers.ContentAADString): raise SerializationError("Unknown aad_content_string") fmt = ">16s{}sIQ".format(len(aad_content_string.value)) return struct.pack(fmt, message_id, aad_content_string.value, seq_num, length)
Assembles the Body AAD string for a message body structure. :param message_id: Message ID :type message_id: str :param aad_content_string: ContentAADString object for frame type :type aad_content_string: aws_encryption_sdk.identifiers.ContentAADString :param seq_num: Sequence number of frame :type seq_num: int :param length: Content Length :type length: int :returns: Properly formatted AAD bytes for message body structure. :rtype: bytes :raises SerializationError: if aad_content_string is not known
Below is the the instruction that describes the task: ### Input: Assembles the Body AAD string for a message body structure. :param message_id: Message ID :type message_id: str :param aad_content_string: ContentAADString object for frame type :type aad_content_string: aws_encryption_sdk.identifiers.ContentAADString :param seq_num: Sequence number of frame :type seq_num: int :param length: Content Length :type length: int :returns: Properly formatted AAD bytes for message body structure. :rtype: bytes :raises SerializationError: if aad_content_string is not known ### Response: def assemble_content_aad(message_id, aad_content_string, seq_num, length): """Assembles the Body AAD string for a message body structure. :param message_id: Message ID :type message_id: str :param aad_content_string: ContentAADString object for frame type :type aad_content_string: aws_encryption_sdk.identifiers.ContentAADString :param seq_num: Sequence number of frame :type seq_num: int :param length: Content Length :type length: int :returns: Properly formatted AAD bytes for message body structure. :rtype: bytes :raises SerializationError: if aad_content_string is not known """ if not isinstance(aad_content_string, aws_encryption_sdk.identifiers.ContentAADString): raise SerializationError("Unknown aad_content_string") fmt = ">16s{}sIQ".format(len(aad_content_string.value)) return struct.pack(fmt, message_id, aad_content_string.value, seq_num, length)
def pickle_loads(cls, s): """Reconstruct the flow from a string.""" strio = StringIO() strio.write(s) strio.seek(0) flow = pmg_pickle_load(strio) return flow
Reconstruct the flow from a string.
Below is the the instruction that describes the task: ### Input: Reconstruct the flow from a string. ### Response: def pickle_loads(cls, s): """Reconstruct the flow from a string.""" strio = StringIO() strio.write(s) strio.seek(0) flow = pmg_pickle_load(strio) return flow