positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def console_set_char( con: tcod.console.Console, x: int, y: int, c: Union[int, str] ) -> None: """Change the character at x,y to c, keeping the current colors. Args: con (Console): Any Console instance. x (int): Character x position from the left. y (int): Character y position from the top. c (Union[int, AnyStr]): Character to draw, can be an integer or string. .. deprecated:: 8.4 Array access performs significantly faster than using this function. See :any:`Console.ch`. """ lib.TCOD_console_set_char(_console(con), x, y, _int(c))
Change the character at x,y to c, keeping the current colors. Args: con (Console): Any Console instance. x (int): Character x position from the left. y (int): Character y position from the top. c (Union[int, AnyStr]): Character to draw, can be an integer or string. .. deprecated:: 8.4 Array access performs significantly faster than using this function. See :any:`Console.ch`.
def get_sentences(self, root_element, block_tags): """Returns a list of plain-text sentences by iterating through XML tags except for those listed in block_tags.""" sentences = [] for element in root_element: if not self.any_ends_with(block_tags, element.tag): # tag not in block_tags if element.text is not None and not re.match('^\s*$', element.text): sentences.extend(self.sentence_tokenize(element.text)) sentences.extend(self.get_sentences(element, block_tags)) f = open('sentence_debug.txt', 'w') for s in sentences: f.write(s.lower() + '\n') f.close() return sentences
Returns a list of plain-text sentences by iterating through XML tags except for those listed in block_tags.
def full(self): """Return True if there are maxsize items in the queue. Note: if the Queue was initialized with maxsize=0 (the default), then full() is never True. """ if self._parent._maxsize <= 0: return False else: return self.qsize() >= self._parent._maxsize
Return True if there are maxsize items in the queue. Note: if the Queue was initialized with maxsize=0 (the default), then full() is never True.
def partial_trace(self, qubits: Qubits) -> 'QubitVector': """ Return the partial trace over some subset of qubits""" N = self.qubit_nb R = self.rank if R == 1: raise ValueError('Cannot take trace of vector') new_qubits: List[Qubit] = list(self.qubits) for q in qubits: new_qubits.remove(q) if not new_qubits: raise ValueError('Cannot remove all qubits with partial_trace.') indices = [self.qubits.index(qubit) for qubit in qubits] subscripts = list(EINSUM_SUBSCRIPTS)[0:N*R] for idx in indices: for r in range(1, R): subscripts[r * N + idx] = subscripts[idx] subscript_str = ''.join(subscripts) # Only numpy's einsum works with repeated subscripts tensor = self.asarray() tensor = np.einsum(subscript_str, tensor) return QubitVector(tensor, new_qubits)
Return the partial trace over some subset of qubits
def message(self, executor_id, slave_id, message): """Sends a message from the framework to one of its executors. These messages are best effort; do not expect a framework message to be retransmitted in any reliable fashion. """ logging.info('Sends message `{}` to executor `{}` on slave `{}`'.format( message, executor_id, slave_id)) return self.driver.sendFrameworkMessage(encode(executor_id), encode(slave_id), message)
Sends a message from the framework to one of its executors. These messages are best effort; do not expect a framework message to be retransmitted in any reliable fashion.
def get_range_around(range_value, current_item, padding): """ Returns a range of numbers around the given number. This is useful for pagination, where you might want to show something like this:: << < ... 4 5 (6) 7 8 .. > >> In this example `6` would be the current page and we show 2 items around that page (including the page itself). Usage:: {% load libs_tags %} {% get_range_around page_obj.paginator.num_pages page_obj.number 5 as pages %} :param range_amount: Number of total items in your range (1 indexed) :param current_item: The item around which the result should be centered (1 indexed) :param padding: Number of items to show left and right from the current item. """ total_items = 1 + padding * 2 left_bound = padding right_bound = range_value - padding if range_value <= total_items: range_items = range(1, range_value + 1) return { 'range_items': range_items, 'left_padding': False, 'right_padding': False, } if current_item <= left_bound: range_items = range(1, range_value + 1)[:total_items] return { 'range_items': range_items, 'left_padding': range_items[0] > 1, 'right_padding': range_items[-1] < range_value, } if current_item >= right_bound: range_items = range(1, range_value + 1)[-total_items:] return { 'range_items': range_items, 'left_padding': range_items[0] > 1, 'right_padding': range_items[-1] < range_value, } range_items = range(current_item - padding, current_item + padding + 1) return { 'range_items': range_items, 'left_padding': True, 'right_padding': True, }
Returns a range of numbers around the given number. This is useful for pagination, where you might want to show something like this:: << < ... 4 5 (6) 7 8 .. > >> In this example `6` would be the current page and we show 2 items around that page (including the page itself). Usage:: {% load libs_tags %} {% get_range_around page_obj.paginator.num_pages page_obj.number 5 as pages %} :param range_amount: Number of total items in your range (1 indexed) :param current_item: The item around which the result should be centered (1 indexed) :param padding: Number of items to show left and right from the current item.
def stream_json_file(local_file): """Stream a JSON file (in JSON-per-line format) Args: local_file (file-like object) an open file-handle that contains a JSON string on each line Yields: (dict) JSON objects """ for i, line in enumerate(local_file): try: data = json.loads(line.decode('utf-8')) yield data except ValueError as e: logging.warning("Skipping line %d due to error: %s", i, e) continue
Stream a JSON file (in JSON-per-line format) Args: local_file (file-like object) an open file-handle that contains a JSON string on each line Yields: (dict) JSON objects
def get(self, *args, **kwargs): """Perform a get request.""" if 'convert' in kwargs: conversion = kwargs.pop('convert') else: conversion = True kwargs = self._get_keywords(**kwargs) url = self._create_path(*args) request = self.session.get(url, params=kwargs) content = request.content self._request = request return self.convert(content, conversion)
Perform a get request.
def make_session(username=None, password=None, bearer_token=None, extra_headers_dict=None): """Creates a Requests Session for use. Accepts a bearer token for premiums users and will override username and password information if present. Args: username (str): username for the session password (str): password for the user bearer_token (str): token for a premium API user. """ if password is None and bearer_token is None: logger.error("No authentication information provided; " "please check your object") raise KeyError session = requests.Session() session.trust_env = False headers = {'Accept-encoding': 'gzip', 'User-Agent': 'twitterdev-search-tweets-python/' + VERSION} if bearer_token: logger.info("using bearer token for authentication") headers['Authorization'] = "Bearer {}".format(bearer_token) session.headers = headers else: logger.info("using username and password for authentication") session.auth = username, password session.headers = headers if extra_headers_dict: headers.update(extra_headers_dict) return session
Creates a Requests Session for use. Accepts a bearer token for premiums users and will override username and password information if present. Args: username (str): username for the session password (str): password for the user bearer_token (str): token for a premium API user.
def search_series(self, name=None, imdb_id=None, zap2it_id=None): """ Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id. :param name: the name of the series to look for :param imdb_id: the IMDB id of the series to look for :param zap2it_id: the zap2it id of the series to look for. :return: a python dictionary with either the result of the search or an error from TheTVDB. """ arguments = locals() optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'} query_string = utils.query_param_string_from_option_args(optional_parameters, arguments) raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series', query_string), headers=self.__get_header_with_auth()) return self.parse_raw_response(raw_response)
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id. :param name: the name of the series to look for :param imdb_id: the IMDB id of the series to look for :param zap2it_id: the zap2it id of the series to look for. :return: a python dictionary with either the result of the search or an error from TheTVDB.
def frames(self, flush=True): """ Returns the latest color image from the stream Raises: Exception if opencv sensor gives ret_val of 0 """ self.flush() ret_val, frame = self._sensor.read() if not ret_val: raise Exception("Unable to retrieve frame from OpenCVCameraSensor for id {0}".format(self._device_id)) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if self._upside_down: frame = np.flipud(frame).astype(np.uint8) frame = np.fliplr(frame).astype(np.uint8) return ColorImage(frame)
Returns the latest color image from the stream Raises: Exception if opencv sensor gives ret_val of 0
def unbind(self): """ Unbinds this connection from queue and topic managers (freeing up resources) and resets state. """ self.connected = False self.queue_manager.disconnect(self.connection) self.topic_manager.disconnect(self.connection)
Unbinds this connection from queue and topic managers (freeing up resources) and resets state.
def get_fipscode(self, obj): """County FIPS code""" if obj.division.level.name == DivisionLevel.COUNTY: return obj.division.code return None
County FIPS code
def drop_vocab(self, vocab_name, **kwargs): """ Removes the vocab from the definiton triplestore args: vocab_name: the name or uri of the vocab to return """ vocab_dict = self.__get_vocab_dict__(vocab_name, **kwargs) return self.drop_file(vocab_dict['filename'], **kwargs)
Removes the vocab from the definiton triplestore args: vocab_name: the name or uri of the vocab to return
def delete_external_feed_courses(self, course_id, external_feed_id): """ Delete an external feed. Deletes the external feed. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - external_feed_id """ID""" path["external_feed_id"] = external_feed_id self.logger.debug("DELETE /api/v1/courses/{course_id}/external_feeds/{external_feed_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/courses/{course_id}/external_feeds/{external_feed_id}".format(**path), data=data, params=params, single_item=True)
Delete an external feed. Deletes the external feed.
def find(self, id): """Get a resource by its id Args: id (string): Resource id Returns: object: Instance of the resource type """ url = "{}/{}/{}".format(__endpoint__, self.type.RESOURCE, id) response = RestClient.get(url)[self.type.RESOURCE[:-1]] return self.type(response)
Get a resource by its id Args: id (string): Resource id Returns: object: Instance of the resource type
def scan(self, stop_on_first=True, base_ip=0): """Scans the local network for TVs.""" tvs = [] # Check if base_ip has been passed if base_ip == 0: # Find IP address of computer pymitv is running on sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.connect(("8.8.8.8", 80)) ip = sock.getsockname()[0] sock.close() # Get IP and compose a base like 192.168.1.xxx ip_parts = ip.split('.') base_ip = ip_parts[0] + '.' + ip_parts[1] + '.' + ip_parts[2] # Loop through every IP and check if TV is alive for ip_suffix in range(2, 256): ip_check = '{}.{}'.format(base_ip, ip_suffix) if self.check_ip(ip_check): tvs.append(ip_check) if stop_on_first: break return tvs
Scans the local network for TVs.
def get_neuroglancer_link(self, resource, resolution, x_range, y_range, z_range, url_prefix, **kwargs): """ Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step. Args: resource (intern.resource.Resource): Resource compatible with cutout operations. resolution (int): 0 indicates native resolution. x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20. y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20. z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20. url_prefix (string): Protocol + host such as https://api.theboss.io Returns: (string): Return neuroglancer link. Raises: RuntimeError when given invalid resource. Other exceptions may be raised depending on the volume service's implementation. """ link = "https://neuroglancer.theboss.io/#!{'layers':{'" + str(resource.name) + "':{'type':'" + resource.type + "'_'source':" + "'boss://" + url_prefix+ "/" + resource.coll_name + "/" + resource.exp_name + "/" + resource.name + "'}}_'navigation':{'pose':{'position':{'voxelCoordinates':[" + str(x_range[0]) + "_" + str(y_range[0]) + "_" + str(z_range[0]) + "]}}}}" return link
Get a neuroglancer link of the cutout specified from the host specified in the remote configuration step. Args: resource (intern.resource.Resource): Resource compatible with cutout operations. resolution (int): 0 indicates native resolution. x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20. y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20. z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20. url_prefix (string): Protocol + host such as https://api.theboss.io Returns: (string): Return neuroglancer link. Raises: RuntimeError when given invalid resource. Other exceptions may be raised depending on the volume service's implementation.
def objectives(self, rank): """Returns objective values of models with specified rank. """ self._check_rank(rank) return [result.obj for result in self.results[rank]]
Returns objective values of models with specified rank.
def list_models(self, macaroons): """ Get the logged in user's models from the JIMM controller. @param macaroons The discharged JIMM macaroons. @return The json decoded list of environments. """ return make_request("{}model".format(self.url), timeout=self.timeout, client=self._client, cookies=self.cookies)
Get the logged in user's models from the JIMM controller. @param macaroons The discharged JIMM macaroons. @return The json decoded list of environments.
def getDXGIOutputInfo(self): """ [D3D10/11 Only] Returns the adapter index and output index that the user should pass into EnumAdapters and EnumOutputs to create the device and swap chain in DX10 and DX11. If an error occurs both indices will be set to -1. """ fn = self.function_table.getDXGIOutputInfo pnAdapterIndex = c_int32() fn(byref(pnAdapterIndex)) return pnAdapterIndex.value
[D3D10/11 Only] Returns the adapter index and output index that the user should pass into EnumAdapters and EnumOutputs to create the device and swap chain in DX10 and DX11. If an error occurs both indices will be set to -1.
def fullname(self): """Returns the name of the ``Record`` class this ``Property`` is attached to, and attribute name it is attached as.""" if not self.bound: if self.name is not None: return "(unbound).%s" % self.name else: return "(unbound)" elif not self.class_(): classname = "(GC'd class)" else: classname = self.class_().__name__ return "%s.%s" % (classname, self.name)
Returns the name of the ``Record`` class this ``Property`` is attached to, and attribute name it is attached as.
def write(self, pkt): """ Writes a Packet or bytes to a pcap file. :param pkt: Packet(s) to write (one record for each Packet), or raw bytes to write (as one record). :type pkt: iterable[Packet], Packet or bytes """ if isinstance(pkt, bytes): if not self.header_present: self._write_header(pkt) self._write_packet(pkt) else: pkt = pkt.__iter__() for p in pkt: if not self.header_present: self._write_header(p) self._write_packet(p)
Writes a Packet or bytes to a pcap file. :param pkt: Packet(s) to write (one record for each Packet), or raw bytes to write (as one record). :type pkt: iterable[Packet], Packet or bytes
def _single_qubit_accumulate_into_scratch(args: Dict[str, Any]): """Accumulates single qubit phase gates into the scratch shards.""" index = args['indices'][0] shard_num = args['shard_num'] half_turns = args['half_turns'] num_shard_qubits = args['num_shard_qubits'] scratch = _scratch_shard(args) # ExpZ = exp(-i pi Z half_turns / 2). if index >= num_shard_qubits: # Acts on prefix qubits. sign = 1 - 2 * _kth_bit(shard_num, index - num_shard_qubits) scratch -= half_turns * sign else: # Acts on shard qubits. scratch -= half_turns * _pm_vects(args)[index]
Accumulates single qubit phase gates into the scratch shards.
def bool(cls, must=None, should=None, must_not=None, minimum_number_should_match=None, boost=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html A query that matches documents matching boolean combinations of other queris. The bool query maps to Lucene BooleanQuery. It is built using one of more boolean clauses, each clause with a typed occurrence. The occurrence types are: 'must' - The clause(query) must appear in matching documents. 'should' - The clause(query) should appear in the matching document. A boolean query with no 'must' clauses, one or more 'should' clauses must match a document. The minimum number of 'should' clauses to match can be set using 'minimum_number_should_match' parameter. 'must_not' - The clause(query) must not appear in the matching documents. Note that it is not possible to search on documents that only consists of a 'must_not' clause(s). 'minimum_number_should_match' - Minimum number of documents that should match 'boost' - boost value > term = ElasticQuery() > term.term(user='kimchy') > query = ElasticQuery() > query.bool(should=term) > query.query() { 'bool' : { 'should' : { 'term' : {'user':'kimchy'}}}} ''' instance = cls(bool={}) if must is not None: instance['bool']['must'] = must if should is not None: instance['bool']['should'] = should if must_not is not None: instance['bool']['must_not'] = must_not if minimum_number_should_match is not None: instance['bool']['minimum_number_should_match'] = minimum_number_should_match if boost is not None: instance['bool']['boost'] = boost return instance
http://www.elasticsearch.org/guide/reference/query-dsl/bool-query.html A query that matches documents matching boolean combinations of other queris. The bool query maps to Lucene BooleanQuery. It is built using one of more boolean clauses, each clause with a typed occurrence. The occurrence types are: 'must' - The clause(query) must appear in matching documents. 'should' - The clause(query) should appear in the matching document. A boolean query with no 'must' clauses, one or more 'should' clauses must match a document. The minimum number of 'should' clauses to match can be set using 'minimum_number_should_match' parameter. 'must_not' - The clause(query) must not appear in the matching documents. Note that it is not possible to search on documents that only consists of a 'must_not' clause(s). 'minimum_number_should_match' - Minimum number of documents that should match 'boost' - boost value > term = ElasticQuery() > term.term(user='kimchy') > query = ElasticQuery() > query.bool(should=term) > query.query() { 'bool' : { 'should' : { 'term' : {'user':'kimchy'}}}}
def _queue(self, kwargs): """The hard resource_list comes like this: '<qname>=TRUE,mem=128M'. To process it we have to split it twice (',' and then on '='), create a dictionary and extract just the qname""" if not 'hard resource_list' in kwargs: return 'all.q' d = dict([k.split('=') for k in kwargs['hard resource_list'].split(',')]) for k in d: if k[0] == 'q' and d[k] == 'TRUE': return k return 'all.q'
The hard resource_list comes like this: '<qname>=TRUE,mem=128M'. To process it we have to split it twice (',' and then on '='), create a dictionary and extract just the qname
def add_method(function, klass, name=None): '''Add an existing function to a class as a method. Note: Consider using the extend decorator as a more readable alternative to using this function directly. Args: function: The function to be added to the class klass. klass: The class to which the new method will be added. name: An optional name for the new method. If omitted or None the original name of the function is used. Returns: The function argument unmodified. Raises: ValueError: If klass already has an attribute with the same name as the extension method. ''' # Should we be using functools.update_wrapper in here? if name is None: name = function_name(function) if hasattr(klass, name): raise ValueError("Cannot replace existing attribute with method " "'{name}'".format(name=name)) setattr(klass, name, function) return function
Add an existing function to a class as a method. Note: Consider using the extend decorator as a more readable alternative to using this function directly. Args: function: The function to be added to the class klass. klass: The class to which the new method will be added. name: An optional name for the new method. If omitted or None the original name of the function is used. Returns: The function argument unmodified. Raises: ValueError: If klass already has an attribute with the same name as the extension method.
def _get_analysis_period_subset(self, a_per): """Return an analysis_period is always a subset of the Data Collection""" if self.header.analysis_period.is_annual: return a_per new_needed = False n_ap = [a_per.st_month, a_per.st_day, a_per.st_hour, a_per.end_month, a_per.end_day, a_per.end_hour, a_per.timestep, a_per.is_leap_year] if a_per.st_hour < self.header.analysis_period.st_hour: n_ap[2] = self.header.analysis_period.st_hour new_needed = True if a_per.end_hour > self.header.analysis_period.end_hour: n_ap[5] = self.header.analysis_period.end_hour new_needed = True if a_per.st_time.doy < self.header.analysis_period.st_time.doy: n_ap[0] = self.header.analysis_period.st_month n_ap[1] = self.header.analysis_period.st_day new_needed = True if a_per.end_time.doy > self.header.analysis_period.end_time.doy: n_ap[3] = self.header.analysis_period.end_month n_ap[4] = self.header.analysis_period.end_day new_needed = True if new_needed is False: return a_per else: return AnalysisPeriod(*n_ap)
Return an analysis_period is always a subset of the Data Collection
def _apply_scope(self, scope, builder): """ Apply a single scope on the given builder instance. :param scope: The scope to apply :type scope: callable or Scope :param builder: The builder to apply the scope to :type builder: Builder """ if callable(scope): scope(builder) elif isinstance(scope, Scope): scope.apply(builder, self.get_model())
Apply a single scope on the given builder instance. :param scope: The scope to apply :type scope: callable or Scope :param builder: The builder to apply the scope to :type builder: Builder
def zoneheight(idf, zonename, debug=False): """zone height""" zone = idf.getobject('ZONE', zonename) surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()] zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name] floors = [s for s in zone_surfs if s.Surface_Type.upper() == 'FLOOR'] roofs = [s for s in zone_surfs if s.Surface_Type.upper() == 'ROOF'] if floors == [] or roofs == []: height = zone_height_min2max(idf, zonename) else: height = zone_floor2roofheight(idf, zonename) return height
zone height
def expand_paths(paths, marker='*'): """ :param paths: A glob path pattern string or pathlib.Path object holding such path, or a list consists of path strings or glob path pattern strings or pathlib.Path object holding such ones, or file objects :param marker: Glob marker character or string, e.g. '*' :return: List of path strings >>> expand_paths([]) [] >>> expand_paths("/usr/lib/a/b.conf /etc/a/b.conf /run/a/b.conf".split()) ['/usr/lib/a/b.conf', '/etc/a/b.conf', '/run/a/b.conf'] >>> paths_s = os.path.join(os.path.dirname(__file__), "u*.py") >>> ref = sglob(paths_s) >>> assert expand_paths(paths_s) == ref >>> ref = ["/etc/a.conf"] + ref >>> assert expand_paths(["/etc/a.conf", paths_s]) == ref >>> strm = anyconfig.compat.StringIO() >>> assert expand_paths(["/etc/a.conf", strm]) == ["/etc/a.conf", strm] """ if is_path(paths) and marker in paths: return sglob(paths) if is_path_obj(paths) and marker in paths.as_posix(): # TBD: Is it better to return [p :: pathlib.Path] instead? return [normpath(p) for p in sglob(paths.as_posix())] return list(_expand_paths_itr(paths, marker=marker))
:param paths: A glob path pattern string or pathlib.Path object holding such path, or a list consists of path strings or glob path pattern strings or pathlib.Path object holding such ones, or file objects :param marker: Glob marker character or string, e.g. '*' :return: List of path strings >>> expand_paths([]) [] >>> expand_paths("/usr/lib/a/b.conf /etc/a/b.conf /run/a/b.conf".split()) ['/usr/lib/a/b.conf', '/etc/a/b.conf', '/run/a/b.conf'] >>> paths_s = os.path.join(os.path.dirname(__file__), "u*.py") >>> ref = sglob(paths_s) >>> assert expand_paths(paths_s) == ref >>> ref = ["/etc/a.conf"] + ref >>> assert expand_paths(["/etc/a.conf", paths_s]) == ref >>> strm = anyconfig.compat.StringIO() >>> assert expand_paths(["/etc/a.conf", strm]) == ["/etc/a.conf", strm]
def set_fluxinfo(self): """ Uses list of known flux calibrators (with models in CASA) to find full name given in scan. """ knowncals = ['3C286', '3C48', '3C147', '3C138'] # find scans with knowncals in the name sourcenames = [self.sources[source]['source'] for source in self.sources] calsources = [cal for src in sourcenames for cal in knowncals if cal in src] calsources_full = [src for src in sourcenames for cal in knowncals if cal in src] if len(calsources): # if cal found, set band name from first spw self.band = self.sdm['Receiver'][0].frequencyBand.split('_')[1] if len(calsources) > 1: print 'Found multiple flux calibrators:', calsources self.fluxname = calsources[0] self.fluxname_full = calsources_full[0] print 'Set flux calibrator to %s and band to %s.' % (self.fluxname_full, self.band) else: self.fluxname = '' self.fluxname_full = '' self.band = ''
Uses list of known flux calibrators (with models in CASA) to find full name given in scan.
async def build_attrib_request(submitter_did: str, target_did: str, xhash: Optional[str], raw: Optional[str], enc: Optional[str]) -> str: """ Builds an ATTRIB request. Request to add attribute to a NYM record. :param submitter_did: DID of the submitter stored in secured Wallet. :param target_did: Target DID as base58-encoded string for 16 or 32 bit DID value. :param xhash: (Optional) Hash of attribute data. :param raw: (Optional) Json, where key is attribute name and value is attribute value. :param enc: (Optional) Encrypted value attribute data. :return: Request result as json. """ logger = logging.getLogger(__name__) logger.debug("build_attrib_request: >>> submitter_did: %r, target_did: %r, hash: %r, raw: %r, enc: %r", submitter_did, target_did, xhash, raw, enc) if not hasattr(build_attrib_request, "cb"): logger.debug("build_attrib_request: Creating callback") build_attrib_request.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) c_submitter_did = c_char_p(submitter_did.encode('utf-8')) c_target_did = c_char_p(target_did.encode('utf-8')) c_hash = c_char_p(xhash.encode('utf-8')) if xhash is not None else None c_raw = c_char_p(raw.encode('utf-8')) if raw is not None else None c_enc = c_char_p(enc.encode('utf-8')) if enc is not None else None request_json = await do_call('indy_build_attrib_request', c_submitter_did, c_target_did, c_hash, c_raw, c_enc, build_attrib_request.cb) res = request_json.decode() logger.debug("build_attrib_request: <<< res: %r", res) return res
Builds an ATTRIB request. Request to add attribute to a NYM record. :param submitter_did: DID of the submitter stored in secured Wallet. :param target_did: Target DID as base58-encoded string for 16 or 32 bit DID value. :param xhash: (Optional) Hash of attribute data. :param raw: (Optional) Json, where key is attribute name and value is attribute value. :param enc: (Optional) Encrypted value attribute data. :return: Request result as json.
def getData(self, *statements): """ Get the data corresponding to the display statements. The statements can be AMPL expressions, or entities. It captures the equivalent of the command: .. code-block:: ampl display ds1, ..., dsn; where ds1, ..., dsn are the ``displayStatements`` with which the function is called. As only one DataFrame is returned, the operation will fail if the results of the display statements cannot be indexed over the same set. As a result, any attempt to get data from more than one set, or to get data for multiple parameters with a different number of indexing sets will fail. Args: statements: The display statements to be fetched. Raises: RuntimeError: if the AMPL visualization command does not succeed for one of the reasons listed above. Returns: DataFrame capturing the output of the display command in tabular form. """ # FIXME: only works for the first statement. return lock_and_call( lambda: DataFrame._fromDataFrameRef( self._impl.getData(list(statements), len(statements)) ), self._lock )
Get the data corresponding to the display statements. The statements can be AMPL expressions, or entities. It captures the equivalent of the command: .. code-block:: ampl display ds1, ..., dsn; where ds1, ..., dsn are the ``displayStatements`` with which the function is called. As only one DataFrame is returned, the operation will fail if the results of the display statements cannot be indexed over the same set. As a result, any attempt to get data from more than one set, or to get data for multiple parameters with a different number of indexing sets will fail. Args: statements: The display statements to be fetched. Raises: RuntimeError: if the AMPL visualization command does not succeed for one of the reasons listed above. Returns: DataFrame capturing the output of the display command in tabular form.
def delete(ctx, family_id, individual_id, root): """ Delete a case or individual from the database. If no database was found run puzzle init first. """ root = root or ctx.obj.get('root') or os.path.expanduser("~/.puzzle") if os.path.isfile(root): logger.error("'root' can't be a file") ctx.abort() logger.info("Root directory is: {}".format(root)) db_path = os.path.join(root, 'puzzle_db.sqlite3') logger.info("db path is: {}".format(db_path)) if not os.path.exists(db_path): logger.warn("database not initialized, run 'puzzle init'") ctx.abort() store = SqlStore(db_path) if family_id: case_obj = store.case(case_id=family_id) if case_obj is None: logger.warning("Family {0} does not exist in database" .format(family_id)) ctx.abort() store.delete_case(case_obj) elif individual_id: ind_obj = store.individual(ind_id=individual_id) if ind_obj.ind_id != individual_id: logger.warning("Individual {0} does not exist in database" .format(individual_id)) ctx.abort() store.delete_individual(ind_obj) else: logger.warning("Please provide a family or individual id") ctx.abort()
Delete a case or individual from the database. If no database was found run puzzle init first.
def set_plugin_filepaths(self, filepaths, except_blacklisted=True): """ Sets `filepaths` to the `self.plugin_filepaths`. Recommend passing in absolute filepaths. Method will attempt to convert to absolute paths if they are not already. `filepaths` can be a single object or an iterable. If `except_blacklisted` is `True`, all `filepaths` that have been blacklisted will not be set. """ filepaths = util.to_absolute_paths(filepaths) if except_blacklisted: filepaths = util.remove_from_set(filepaths, self.blacklisted_filepaths) self.plugin_filepaths = filepaths
Sets `filepaths` to the `self.plugin_filepaths`. Recommend passing in absolute filepaths. Method will attempt to convert to absolute paths if they are not already. `filepaths` can be a single object or an iterable. If `except_blacklisted` is `True`, all `filepaths` that have been blacklisted will not be set.
def find(self, resource_id, query=None, **kwargs): """Gets a single resource.""" if query is None: query = {} return self.client._get( self._url(resource_id), query, **kwargs )
Gets a single resource.
def counter_style(self, val, style): """Return counter value in given style.""" if style == 'decimal-leading-zero': if val < 10: valstr = "0{}".format(val) else: valstr = str(val) elif style == 'lower-roman': valstr = _to_roman(val).lower() elif style == 'upper-roman': valstr = _to_roman(val) elif style == 'lower-latin' or style == 'lower-alpha': if 1 <= val <= 26: valstr = chr(val + 96) else: log(WARN, 'Counter out of range for latin (must be 1...26)') valstr = str(val) elif style == 'upper-latin' or style == 'upper-alpha': if 1 <= val <= 26: valstr = chr(val + 64) else: log(WARN, 'Counter out of range for latin (must be 1...26)') valstr = str(val) elif style == 'decimal': valstr = str(val) else: log(WARN, u"ERROR: Counter numbering not supported for" u" list type {}. Using decimal.".format( style).encode('utf-8')) valstr = str(val) return valstr
Return counter value in given style.
def unload_module(self, path): '''Unload a loaded shared library. Call this function to remove a shared library (e.g. a component) that was previously loaded. @param path The path to the shared library. @raises FailedToUnloadModuleError ''' with self._mutex: if self._obj.unload_module(path) != RTC.RTC_OK: raise FailedToUnloadModuleError(path)
Unload a loaded shared library. Call this function to remove a shared library (e.g. a component) that was previously loaded. @param path The path to the shared library. @raises FailedToUnloadModuleError
def create(args): """ cdstarcat create PATH Create objects in CDSTAR specified by PATH. When PATH is a file, a single object (possibly with multiple bitstreams) is created; When PATH is a directory, an object will be created for each file in the directory (recursing into subdirectories). """ with _catalog(args) as cat: for fname, created, obj in cat.create(args.args[0], {}): args.log.info('{0} -> {1} object {2.id}'.format( fname, 'new' if created else 'existing', obj))
cdstarcat create PATH Create objects in CDSTAR specified by PATH. When PATH is a file, a single object (possibly with multiple bitstreams) is created; When PATH is a directory, an object will be created for each file in the directory (recursing into subdirectories).
def feed_parser(self, data): """Parse received message.""" assert isinstance(data, bytes) self.controller.feed_parser(data)
Parse received message.
def get_gallery_album(self, id): """ Return the gallery album matching the id. Note that an album's id is different from it's id as a gallery album. This makes it possible to remove an album from the gallery and setting it's privacy setting as secret, without compromising it's secrecy. """ url = self._base_url + "/3/gallery/album/{0}".format(id) resp = self._send_request(url) return Gallery_album(resp, self)
Return the gallery album matching the id. Note that an album's id is different from it's id as a gallery album. This makes it possible to remove an album from the gallery and setting it's privacy setting as secret, without compromising it's secrecy.
def create(self, throw_on_exists=False): """ Creates a database defined by the current database object, if it does not already exist and raises a CloudantException if the operation fails. If the database already exists then this method call is a no-op. :param bool throw_on_exists: Boolean flag dictating whether or not to throw a CloudantDatabaseException when attempting to create a database that already exists. :returns: The database object """ if not throw_on_exists and self.exists(): return self resp = self.r_session.put(self.database_url, params={ 'partitioned': TYPE_CONVERTERS.get(bool)(self._partitioned) }) if resp.status_code == 201 or resp.status_code == 202: return self raise CloudantDatabaseException( resp.status_code, self.database_url, resp.text )
Creates a database defined by the current database object, if it does not already exist and raises a CloudantException if the operation fails. If the database already exists then this method call is a no-op. :param bool throw_on_exists: Boolean flag dictating whether or not to throw a CloudantDatabaseException when attempting to create a database that already exists. :returns: The database object
def get_bits( self, count ): """Get an integer containing the next [count] bits from the source.""" result = 0 for i in range( count ): if self.bits_remaining <= 0: self._fill_buffer() if self.bits_reverse: bit = (1 if (self.current_bits & (0x80 << 8*(self.bytes_to_cache-1))) else 0) self.current_bits <<= 1 self.current_bits &= 0xff else: bit = (self.current_bits & 1) self.current_bits >>= 1 self.bits_remaining -= 1 if self.output_reverse: result <<= 1 result |= bit else: result |= bit << i return result
Get an integer containing the next [count] bits from the source.
def update_group(self, group_name, new_group_name=None, new_path=None): """ Updates name and/or path of the specified group. :type group_name: string :param group_name: The name of the new group :type new_group_name: string :param new_group_name: If provided, the name of the group will be changed to this name. :type new_path: string :param new_path: If provided, the path of the group will be changed to this path. """ params = {'GroupName' : group_name} if new_group_name: params['NewGroupName'] = new_group_name if new_path: params['NewPath'] = new_path return self.get_response('UpdateGroup', params)
Updates name and/or path of the specified group. :type group_name: string :param group_name: The name of the new group :type new_group_name: string :param new_group_name: If provided, the name of the group will be changed to this name. :type new_path: string :param new_path: If provided, the path of the group will be changed to this path.
def replace_header(self, header_text): """Replace pip-compile header with custom text""" with open(self.outfile, 'rt') as fp: _, body = self.split_header(fp) with open(self.outfile, 'wt') as fp: fp.write(header_text) fp.writelines(body)
Replace pip-compile header with custom text
def repeat_masker_alignment_iterator(fn, index_friendly=True, verbose=False): """ Iterator for repeat masker alignment files; yields multiple alignment objects. Iterate over a file/stream of full repeat alignments in the repeatmasker format. Briefly, this format is as follows: each record (alignment) begins with a header line (see _rm_parse_header_line documentation for details of header format), followed by the alignment itself (example below) and finally a set of key-value meta-data pairs. The actual alignment looks like this:: chr1 11 CCCTGGAGATTCTTATT--AGTGATTTGGGCT 41 ii v -- v i i v C MER5B#DNA/hAT 10 CCCCAGAGATTCTGATTTAATTGGTCTGGGGT 42 chr1 42 GACTG 47 v C MER5B#DNA/hAT 43 CACTG 48 The 'C' indicates that its the reverse complement of the consensus. The central string gives information about matches; "-" indicates an insertion/deletion, "i" a transition (G<->A, C<->T) and "v" a transversion (all other substitutions). :param fh: filename or stream-like object to read from. :param index_friendly: if True, we will ensure the file/stream position is before the start of the record when we yield it; this requires the ability to seek within the stream though, so if iterating over a stream wtihout that ability, you'll have to set this to false. Further, this will disable buffering for the file, to ensure file.tell() behaves correctly, so a performance hit will be incurred. :param verbose: if true, output progress messages to stderr. """ # step 1 -- build our iterator for the stream.. try: fh = open(fn) except (TypeError): fh = fn iterable = fh if index_friendly: iterable = iter(fh.readline, '') # build progress indicator, if we want one and we're able to if verbose: try: m_fn = ": " + fh.name except TypeError: m_fn = "" try: current = fh.tell() fh.seek(0, 2) total_progress = fh.tell() fh.seek(current) pind = ProgressIndicator(totalToDo=total_progress, messagePrefix="completed", messageSuffix="of processing repeat-masker " "alignment file" + m_fn) except IOError: pind = None old_fh_pos = None new_fh_pos = fh.tell() s1 = None s2 = None s1_name = None s2_name = None s1_start = None s1_end = None s2_start = None s2_end = None meta_data = None alignment_line_counter = 0 alig_l_space = 0 prev_seq_len = 0 rev_comp_match = None remaining_repeat = None remaining_genomic = None for line in iterable: if verbose and pind is not None: pind.done = fh.tell() pind.showProgress() if index_friendly: old_fh_pos = new_fh_pos new_fh_pos = fh.tell() line = line.rstrip() if line.lstrip() == "" and alignment_line_counter % 3 != 1: continue s_pres_split = re.split(r'(\s+)', line) parts = [x for x in s_pres_split if not (x.isspace() or x == "")] n = len(parts) for i in REPEATMASKER_FIELDS_TO_TRIM: if n >= i + 1: parts[i] = parts[i].strip() # decide what to do with this line -- is it a header line, part of the # alignment or a meta-data key-value line if alignment_line_counter % 3 == 1: if (REPEATMASKER_VALIDATE_MUTATIONS and not _rm_is_valid_annotation_line(line)): raise IOError("invalid mutation line: " + line) l_space = _rm_compute_leading_space(s_pres_split) - alig_l_space pad_right = prev_seq_len - (l_space + len(line.strip())) meta_data[ANNOTATION_KEY] += ((' ' * l_space) + line.strip() + (' ' * pad_right)) alignment_line_counter += 1 elif _rm_is_header_line(parts, n): if not (s1 is None and s2 is None and meta_data is None): if ANNOTATION_KEY in meta_data: meta_data[ANNOTATION_KEY] = meta_data[ANNOTATION_KEY].rstrip() if index_friendly: fh.seek(old_fh_pos) ss1 = Sequence(s1_name, s1, s1_start, s1_end, "+", remaining_genomic) s2s = "-" if rev_comp_match else "+" ss2 = Sequence(s2_name, s2, s2_start, s2_end, s2s, remaining_repeat) yield PairwiseAlignment(ss1, ss2, meta_data) if index_friendly: fh.seek(new_fh_pos) meta_data = {} s1 = "" s2 = "" s1_name, s2_name = _rm_get_names_from_header(parts) s1_start, s1_end = _rm_get_reference_coords_from_header(parts) s2_start, s2_end = _rm_get_repeat_coords_from_header(parts) rev_comp_match = _rm_is_reverse_comp_match(parts) remaining_repeat = _rm_get_remaining_repeat_from_header(parts) remaining_genomic = _rm_get_remaining_genomic_from_header(parts) _rm_parse_header_line(parts, meta_data) alignment_line_counter = 0 elif _rm_is_alignment_line(parts, s1_name, s2_name): alignment_line_counter += 1 name, seq = _rm_extract_sequence_and_name(parts, s1_name, s2_name) if name == s1_name: s1 += seq elif name == s2_name: s2 += seq alig_l_space = _rm_compute_leading_space_alig(s_pres_split, seq) prev_seq_len = len(seq) else: k, v = _rm_parse_meta_line(parts) meta_data[k] = v if index_friendly: fh.seek(old_fh_pos) ss1 = Sequence(s1_name, s1, s1_start, s1_end, "+", remaining_genomic) s2s = "-" if rev_comp_match else "+" ss2 = Sequence(s2_name, s2, s2_start, s2_end, s2s, remaining_repeat) yield PairwiseAlignment(ss1, ss2, meta_data) if index_friendly: fh.seek(new_fh_pos)
Iterator for repeat masker alignment files; yields multiple alignment objects. Iterate over a file/stream of full repeat alignments in the repeatmasker format. Briefly, this format is as follows: each record (alignment) begins with a header line (see _rm_parse_header_line documentation for details of header format), followed by the alignment itself (example below) and finally a set of key-value meta-data pairs. The actual alignment looks like this:: chr1 11 CCCTGGAGATTCTTATT--AGTGATTTGGGCT 41 ii v -- v i i v C MER5B#DNA/hAT 10 CCCCAGAGATTCTGATTTAATTGGTCTGGGGT 42 chr1 42 GACTG 47 v C MER5B#DNA/hAT 43 CACTG 48 The 'C' indicates that its the reverse complement of the consensus. The central string gives information about matches; "-" indicates an insertion/deletion, "i" a transition (G<->A, C<->T) and "v" a transversion (all other substitutions). :param fh: filename or stream-like object to read from. :param index_friendly: if True, we will ensure the file/stream position is before the start of the record when we yield it; this requires the ability to seek within the stream though, so if iterating over a stream wtihout that ability, you'll have to set this to false. Further, this will disable buffering for the file, to ensure file.tell() behaves correctly, so a performance hit will be incurred. :param verbose: if true, output progress messages to stderr.
def cli(env): """List Reserved Capacity groups.""" manager = CapacityManager(env.client) result = manager.list() table = formatting.Table( ["ID", "Name", "Capacity", "Flavor", "Location", "Created"], title="Reserved Capacity" ) for r_c in result: occupied_string = "#" * int(r_c.get('occupiedInstanceCount', 0)) available_string = "-" * int(r_c.get('availableInstanceCount', 0)) try: flavor = r_c['instances'][0]['billingItem']['description'] # cost = float(r_c['instances'][0]['billingItem']['hourlyRecurringFee']) except KeyError: flavor = "Unknown Billing Item" location = r_c['backendRouter']['hostname'] capacity = "%s%s" % (occupied_string, available_string) table.add_row([r_c['id'], r_c['name'], capacity, flavor, location, r_c['createDate']]) env.fout(table)
List Reserved Capacity groups.
def _get_object_class(cls, class_name): """ :type class_name: str :rtype: core.BunqModel """ class_name = class_name.lstrip(cls.__STRING_FORMAT_UNDERSCORE) if class_name in cls._override_field_map: class_name = cls._override_field_map[class_name] try: return getattr(endpoint, class_name) except AttributeError: pass try: return getattr(object_, class_name) except AttributeError: pass raise BunqException(cls._ERROR_MODEL_NOT_FOUND.format(class_name))
:type class_name: str :rtype: core.BunqModel
def get_available_ip6_for_vip(self, id_evip, name): """ Get and save a available IP in the network ipv6 for vip request :param id_evip: Vip environment identifier. Integer value and greater than zero. :param name: Ip description :return: Dictionary with the following structure: :: {'ip': {'bloco1':<bloco1>, 'bloco2':<bloco2>, 'bloco3':<bloco3>, 'bloco4':<bloco4>, 'bloco5':<bloco5>, 'bloco6':<bloco6>, 'bloco7':<bloco7>, 'bloco8':<bloco8>, 'id':<id>, 'networkipv6':<networkipv6>, 'description':<description>}} :raise IpNotAvailableError: Network dont have available IP for vip environment. :raise EnvironmentVipNotFoundError: Vip environment not registered. :raise UserNotAuthorizedError: User dont have permission to perform operation. :raise InvalidParameterError: Vip environment identifier is none or invalid. :raise XMLError: Networkapi failed to generate the XML response. :raise DataBaseError: Networkapi failed to access the database. """ if not is_valid_int_param(id_evip): raise InvalidParameterError( u'Vip environment identifier is invalid or was not informed.') url = 'ip/availableip6/vip/' + str(id_evip) + "/" ip_map = dict() ip_map['id_evip'] = id_evip ip_map['name'] = name code, xml = self.submit({'ip_map': ip_map}, 'POST', url) return self.response(code, xml)
Get and save a available IP in the network ipv6 for vip request :param id_evip: Vip environment identifier. Integer value and greater than zero. :param name: Ip description :return: Dictionary with the following structure: :: {'ip': {'bloco1':<bloco1>, 'bloco2':<bloco2>, 'bloco3':<bloco3>, 'bloco4':<bloco4>, 'bloco5':<bloco5>, 'bloco6':<bloco6>, 'bloco7':<bloco7>, 'bloco8':<bloco8>, 'id':<id>, 'networkipv6':<networkipv6>, 'description':<description>}} :raise IpNotAvailableError: Network dont have available IP for vip environment. :raise EnvironmentVipNotFoundError: Vip environment not registered. :raise UserNotAuthorizedError: User dont have permission to perform operation. :raise InvalidParameterError: Vip environment identifier is none or invalid. :raise XMLError: Networkapi failed to generate the XML response. :raise DataBaseError: Networkapi failed to access the database.
def data(request): """Return server side data.""" columns = [ ColumnDT(User.id), ColumnDT(User.name), ColumnDT(Address.description), ColumnDT(func.strftime("%d-%m-%Y", User.birthday)), ColumnDT(User.age) ] query = DBSession.query().select_from(User).join(Address).filter( Address.id > 4) rowTable = DataTables(request.GET, query, columns) return rowTable.output_result()
Return server side data.
def water_bridges(bs_hba, lig_hba, bs_hbd, lig_hbd, water): """Find water-bridged hydrogen bonds between ligand and protein. For now only considers bridged of first degree.""" data = namedtuple('waterbridge', 'a a_orig_idx atype d d_orig_idx dtype h water water_orig_idx distance_aw ' 'distance_dw d_angle w_angle type resnr restype reschain resnr_l restype_l reschain_l protisdon') pairings = [] # First find all acceptor-water pairs with distance within d # and all donor-water pairs with distance within d and angle greater theta lig_aw, prot_aw, lig_dw, prot_hw = [], [], [], [] for w in water: for acc1 in lig_hba: dist = euclidean3d(acc1.a.coords, w.oxy.coords) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST: lig_aw.append((acc1, w, dist)) for acc2 in bs_hba: dist = euclidean3d(acc2.a.coords, w.oxy.coords) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST: prot_aw.append((acc2, w, dist)) for don1 in lig_hbd: dist = euclidean3d(don1.d.coords, w.oxy.coords) d_angle = vecangle(vector(don1.h.coords, don1.d.coords), vector(don1.h.coords, w.oxy.coords)) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST \ and d_angle > config.WATER_BRIDGE_THETA_MIN: lig_dw.append((don1, w, dist, d_angle)) for don2 in bs_hbd: dist = euclidean3d(don2.d.coords, w.oxy.coords) d_angle = vecangle(vector(don2.h.coords, don2.d.coords), vector(don2.h.coords, w.oxy.coords)) if config.WATER_BRIDGE_MINDIST <= dist <= config.WATER_BRIDGE_MAXDIST \ and d_angle > config.WATER_BRIDGE_THETA_MIN: prot_hw.append((don2, w, dist, d_angle)) for l, p in itertools.product(lig_aw, prot_hw): acc, wl, distance_aw = l don, wd, distance_dw, d_angle = p if not wl.oxy == wd.oxy: continue # Same water molecule and angle within omega w_angle = vecangle(vector(acc.a.coords, wl.oxy.coords), vector(wl.oxy.coords, don.h.coords)) if not config.WATER_BRIDGE_OMEGA_MIN < w_angle < config.WATER_BRIDGE_OMEGA_MAX: continue resnr, reschain, restype = whichresnumber(don.d), whichchain(don.d), whichrestype(don.d) resnr_l, reschain_l, restype_l = whichresnumber(acc.a_orig_atom), whichchain( acc.a_orig_atom), whichrestype(acc.a_orig_atom) contact = data(a=acc.a, a_orig_idx=acc.a_orig_idx, atype=acc.a.type, d=don.d, d_orig_idx=don.d_orig_idx, dtype=don.d.type, h=don.h, water=wl.oxy, water_orig_idx=wl.oxy_orig_idx, distance_aw=distance_aw, distance_dw=distance_dw, d_angle=d_angle, w_angle=w_angle, type='first_deg', resnr=resnr, restype=restype, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l, protisdon=True) pairings.append(contact) for p, l in itertools.product(prot_aw, lig_dw): acc, wl, distance_aw = p don, wd, distance_dw, d_angle = l if not wl.oxy == wd.oxy: continue # Same water molecule and angle within omega w_angle = vecangle(vector(acc.a.coords, wl.oxy.coords), vector(wl.oxy.coords, don.h.coords)) if not config.WATER_BRIDGE_OMEGA_MIN < w_angle < config.WATER_BRIDGE_OMEGA_MAX: continue resnr, reschain, restype = whichresnumber(acc.a), whichchain(acc.a), whichrestype(acc.a) resnr_l, reschain_l, restype_l = whichresnumber(don.d_orig_atom), whichchain( don.d_orig_atom), whichrestype(don.d_orig_atom) contact = data(a=acc.a, a_orig_idx=acc.a_orig_idx, atype=acc.a.type, d=don.d, d_orig_idx=don.d_orig_idx, dtype=don.d.type, h=don.h, water=wl.oxy, water_orig_idx=wl.oxy_orig_idx, distance_aw=distance_aw, distance_dw=distance_dw, d_angle=d_angle, w_angle=w_angle, type='first_deg', resnr=resnr, restype=restype, reschain=reschain, restype_l=restype_l, reschain_l=reschain_l, resnr_l=resnr_l, protisdon=False) pairings.append(contact) return filter_contacts(pairings)
Find water-bridged hydrogen bonds between ligand and protein. For now only considers bridged of first degree.
def merge(a_intervals, b_intervals, op): """ Merge two lists of intervals according to the boolean function op ``a_intervals`` and ``b_intervals`` need to be sorted and consistent (no overlapping intervals). This operation keeps the resulting interval set consistent. Parameters ---------- a_intervals : `~numpy.ndarray` A sorted merged list of intervals represented as a N x 2 numpy array b_intervals : `~numpy.ndarray` A sorted merged list of intervals represented as a N x 2 numpy array op : `function` Lambda function taking two params and returning the result of the operation between these two params. Exemple : lambda in_a, in_b: in_a and in_b describes the intersection of ``a_intervals`` and ``b_intervals`` whereas lambda in_a, in_b: in_a or in_b describes the union of ``a_intervals`` and ``b_intervals``. Returns ------- array : `numpy.ndarray` a N x 2 numpy containing intervals resulting from the op between ``a_intervals`` and ``b_intervals``. """ a_endpoints = a_intervals.flatten().tolist() b_endpoints = b_intervals.flatten().tolist() sentinel = max(a_endpoints[-1], b_endpoints[-1]) + 1 a_endpoints += [sentinel] b_endpoints += [sentinel] a_index = 0 b_index = 0 res = [] scan = min(a_endpoints[0], b_endpoints[0]) while scan < sentinel: in_a = not ((scan < a_endpoints[a_index]) ^ (a_index % 2)) in_b = not ((scan < b_endpoints[b_index]) ^ (b_index % 2)) in_res = op(in_a, in_b) if in_res ^ (len(res) % 2): res += [scan] if scan == a_endpoints[a_index]: a_index += 1 if scan == b_endpoints[b_index]: b_index += 1 scan = min(a_endpoints[a_index], b_endpoints[b_index]) return np.asarray(res).reshape((-1, 2))
Merge two lists of intervals according to the boolean function op ``a_intervals`` and ``b_intervals`` need to be sorted and consistent (no overlapping intervals). This operation keeps the resulting interval set consistent. Parameters ---------- a_intervals : `~numpy.ndarray` A sorted merged list of intervals represented as a N x 2 numpy array b_intervals : `~numpy.ndarray` A sorted merged list of intervals represented as a N x 2 numpy array op : `function` Lambda function taking two params and returning the result of the operation between these two params. Exemple : lambda in_a, in_b: in_a and in_b describes the intersection of ``a_intervals`` and ``b_intervals`` whereas lambda in_a, in_b: in_a or in_b describes the union of ``a_intervals`` and ``b_intervals``. Returns ------- array : `numpy.ndarray` a N x 2 numpy containing intervals resulting from the op between ``a_intervals`` and ``b_intervals``.
def buffer_list(editor): """ List all buffers. """ def handler(): wa = editor.window_arrangement for info in wa.list_open_buffers(): char = '%' if info.is_active else '' eb = info.editor_buffer print(' %3i %-2s %-20s line %i' % ( info.index, char, eb.location, (eb.buffer.document.cursor_position_row + 1))) six.moves.input('\nPress ENTER to continue...') run_in_terminal(handler)
List all buffers.
def fromString(strdata): """ Generates profile data from the inputed string data. :param strdata | <str> :return <XViewProfile> """ if strdata: try: xprofile = ElementTree.fromstring(nativestring(strdata)) except ExpatError, err: logger.exception(str(err)) return XViewProfile() return XViewProfile.fromXml(xprofile) logger.warning('Blank profile data provided.') return XViewProfile()
Generates profile data from the inputed string data. :param strdata | <str> :return <XViewProfile>
def create(model, count, *args, **kwargs): ''' Create *count* instances of *model* using the either an appropiate autofixture that was :ref:`registry <registry>` or fall back to the default:class:`AutoFixture` class. *model* can be a model class or its string representation (e.g. ``"app.ModelClass"``). All positional and keyword arguments are passed to the autofixture constructor. It is demonstrated in the example below which will create ten superusers:: import autofixture admins = autofixture.create('auth.User', 10, field_values={'is_superuser': True}) .. note:: See :ref:`AutoFixture` for more information. :func:`create` will return a list of the created objects. ''' from .compat import get_model if isinstance(model, string_types): model = get_model(*model.split('.', 1)) if model in REGISTRY: autofixture_class = REGISTRY[model] else: autofixture_class = AutoFixture # Get keyword arguments that the create_one method accepts and pass them # into create_one instead of AutoFixture.__init__ argnames = set(getargnames(autofixture_class.create_one)) argnames -= set(['self']) create_kwargs = {} for argname in argnames: if argname in kwargs: create_kwargs[argname] = kwargs.pop(argname) autofixture = autofixture_class(model, *args, **kwargs) return autofixture.create(count, **create_kwargs)
Create *count* instances of *model* using the either an appropiate autofixture that was :ref:`registry <registry>` or fall back to the default:class:`AutoFixture` class. *model* can be a model class or its string representation (e.g. ``"app.ModelClass"``). All positional and keyword arguments are passed to the autofixture constructor. It is demonstrated in the example below which will create ten superusers:: import autofixture admins = autofixture.create('auth.User', 10, field_values={'is_superuser': True}) .. note:: See :ref:`AutoFixture` for more information. :func:`create` will return a list of the created objects.
def get_fmri(name, **kwargs): ''' Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash ''' if name.startswith('pkg://'): # already full fmri return name cmd = ['/bin/pkg', 'list', '-aHv', name] # there can be more packages matching the name lines = __salt__['cmd.run_stdout'](cmd).splitlines() if not lines: # empty string = package not found return '' ret = [] for line in lines: ret.append(_ips_get_pkgname(line)) return ret
Returns FMRI from partial name. Returns empty string ('') if not found. In case of multiple match, the function returns list of all matched packages. CLI Example: .. code-block:: bash salt '*' pkg.get_fmri bash
def interpret(code, in_vars): """Try to evaluate the given code, otherwise execute it.""" try: result = eval(code, in_vars) except SyntaxError: pass # exec code outside of exception context else: if result is not None: print(ascii(result)) return # don't also exec code exec_func(code, in_vars)
Try to evaluate the given code, otherwise execute it.
def brightness_to_hex(self, level): """Convert numeric brightness percentage into hex for insteon""" level_int = int(level) new_int = int((level_int * 255)/100) new_level = format(new_int, '02X') self.logger.debug("brightness_to_hex: %s to %s", level, str(new_level)) return str(new_level)
Convert numeric brightness percentage into hex for insteon
def to_array(self): """Return a 1-dimensional |numpy| |numpy.ndarray| with six entries defining the actual date (year, month, day, hour, minute, second). >>> from hydpy import Date >>> Date('1992-10-8 15:15:42').to_array() array([ 1992., 10., 8., 15., 15., 42.]) .. note:: The date defined by the returned |numpy.ndarray| does not include any time zone information and corresponds to |Options.utcoffset|, which defaults to UTC+01:00. """ return numpy.array([self.year, self.month, self.day, self.hour, self.minute, self.second], dtype=float)
Return a 1-dimensional |numpy| |numpy.ndarray| with six entries defining the actual date (year, month, day, hour, minute, second). >>> from hydpy import Date >>> Date('1992-10-8 15:15:42').to_array() array([ 1992., 10., 8., 15., 15., 42.]) .. note:: The date defined by the returned |numpy.ndarray| does not include any time zone information and corresponds to |Options.utcoffset|, which defaults to UTC+01:00.
def group_content(content, namespace, grpname, grpnodetype): """Group the given content in the given namespace under a node of type grpnodetype with the name grpname :param content: the nodes to group :type content: :class:`list` :param namespace: the namespace to use :type namespace: str | None :param grpname: the name of the new grpnode :type grpname: str :param grpnodetype: the nodetype for the grpnode :type grpnodetype: str :returns: the created group node :rtype: str :raises: None """ with common.preserve_namespace(namespace): grpnode = cmds.createNode(grpnodetype, name=grpname) # create grp node cmds.group(content, uag=grpnode) # group content return grpnode
Group the given content in the given namespace under a node of type grpnodetype with the name grpname :param content: the nodes to group :type content: :class:`list` :param namespace: the namespace to use :type namespace: str | None :param grpname: the name of the new grpnode :type grpname: str :param grpnodetype: the nodetype for the grpnode :type grpnodetype: str :returns: the created group node :rtype: str :raises: None
def to_dict(self, properties=None): """Return a dictionary containing Compound data. Optionally specify a list of the desired properties. synonyms, aids and sids are not included unless explicitly specified using the properties parameter. This is because they each require an extra request. """ if not properties: skip = {'aids', 'sids', 'synonyms'} properties = [p for p in dir(Compound) if isinstance(getattr(Compound, p), property) and p not in skip] return {p: [i.to_dict() for i in getattr(self, p)] if p in {'atoms', 'bonds'} else getattr(self, p) for p in properties}
Return a dictionary containing Compound data. Optionally specify a list of the desired properties. synonyms, aids and sids are not included unless explicitly specified using the properties parameter. This is because they each require an extra request.
def shell_out_ignore_exitcode(cmd, stderr=STDOUT, cwd=None): """Same as shell_out but doesn't raise if the cmd exits badly.""" try: return shell_out(cmd, stderr=stderr, cwd=cwd) except CalledProcessError as c: return _clean_output(c.output)
Same as shell_out but doesn't raise if the cmd exits badly.
def find_loader(fullname): """Find a PEP 302 "loader" object for fullname If fullname contains dots, path must be the containing package's __path__. Returns None if the module cannot be found or imported. This function uses iter_importers(), and is thus subject to the same limitations regarding platform-specific special import locations such as the Windows registry. """ for importer in iter_importers(fullname): loader = importer.find_module(fullname) if loader is not None: return loader return None
Find a PEP 302 "loader" object for fullname If fullname contains dots, path must be the containing package's __path__. Returns None if the module cannot be found or imported. This function uses iter_importers(), and is thus subject to the same limitations regarding platform-specific special import locations such as the Windows registry.
def sighash_all(self, index=0, script=None, prevout_value=None, anyone_can_pay=False): ''' SproutTx, int, byte-like, byte-like, bool -> bytearray Sighashes suck Generates the hash to be signed with SIGHASH_ALL https://en.bitcoin.it/wiki/OP_CHECKSIG#Hashtype_SIGHASH_ALL_.28default.29 ''' if riemann.network.FORKID is not None: return self._sighash_forkid(index=index, script=script, prevout_value=prevout_value, sighash_type=shared.SIGHASH_ALL, anyone_can_pay=anyone_can_pay) copy_tx = self._sighash_prep(index=index, script=script) if anyone_can_pay: return self._sighash_anyone_can_pay( index=index, copy_tx=copy_tx, sighash_type=shared.SIGHASH_ALL) return self._sighash_final_hashing(copy_tx, shared.SIGHASH_ALL)
SproutTx, int, byte-like, byte-like, bool -> bytearray Sighashes suck Generates the hash to be signed with SIGHASH_ALL https://en.bitcoin.it/wiki/OP_CHECKSIG#Hashtype_SIGHASH_ALL_.28default.29
def get_homology_models(self): """DictList: Return a DictList of all homology models in self.structures""" # TODO: change to a property? if self.representative_structure: return DictList(x for x in self.structures if not x.is_experimental and x.id != self.representative_structure.id) else: return DictList(x for x in self.structures if not x.is_experimental)
DictList: Return a DictList of all homology models in self.structures
def from_cap(cls, theta, lwin, clat=None, clon=None, nwin=None, theta_degrees=True, coord_degrees=True, dj_matrix=None, weights=None): """ Construct spherical cap localization windows. Usage ----- x = SHWindow.from_cap(theta, lwin, [clat, clon, nwin, theta_degrees, coord_degrees, dj_matrix, weights]) Returns ------- x : SHWindow class instance Parameters ---------- theta : float Angular radius of the spherical cap localization domain (default in degrees). lwin : int Spherical harmonic bandwidth of the localization windows. clat, clon : float, optional, default = None Latitude and longitude of the center of the rotated spherical cap localization windows (default in degrees). nwin : int, optional, default (lwin+1)**2 Number of localization windows. theta_degrees : bool, optional, default = True True if theta is in degrees. coord_degrees : bool, optional, default = True True if clat and clon are in degrees. dj_matrix : ndarray, optional, default = None The djpi2 rotation matrix computed by a call to djpi2. weights : ndarray, optional, default = None Taper weights used with the multitaper spectral analyses. """ if theta_degrees: tapers, eigenvalues, taper_order = _shtools.SHReturnTapers( _np.radians(theta), lwin) else: tapers, eigenvalues, taper_order = _shtools.SHReturnTapers( theta, lwin) return SHWindowCap(theta, tapers, eigenvalues, taper_order, clat, clon, nwin, theta_degrees, coord_degrees, dj_matrix, weights, copy=False)
Construct spherical cap localization windows. Usage ----- x = SHWindow.from_cap(theta, lwin, [clat, clon, nwin, theta_degrees, coord_degrees, dj_matrix, weights]) Returns ------- x : SHWindow class instance Parameters ---------- theta : float Angular radius of the spherical cap localization domain (default in degrees). lwin : int Spherical harmonic bandwidth of the localization windows. clat, clon : float, optional, default = None Latitude and longitude of the center of the rotated spherical cap localization windows (default in degrees). nwin : int, optional, default (lwin+1)**2 Number of localization windows. theta_degrees : bool, optional, default = True True if theta is in degrees. coord_degrees : bool, optional, default = True True if clat and clon are in degrees. dj_matrix : ndarray, optional, default = None The djpi2 rotation matrix computed by a call to djpi2. weights : ndarray, optional, default = None Taper weights used with the multitaper spectral analyses.
def clean_output_files(self, follow_parents=True): """ This method is called when the task reaches S_OK. It removes all the output files produced by the task that are not needed by its children as well as the output files produced by its parents if no other node needs them. Args: follow_parents: If true, the output files of the parents nodes will be removed if possible. Return: list with the absolute paths of the files that have been removed. """ paths = [] if self.status != self.S_OK: logger.warning("Calling task.clean_output_files on a task whose status != S_OK") # Remove all files in tmpdir. self.tmpdir.clean() # Find the file extensions that should be preserved since these files are still # needed by the children who haven't reached S_OK except_exts = set() for child in self.get_children(): if child.status == self.S_OK: continue # Find the position of self in child.deps and add the extensions. i = [dep.node for dep in child.deps].index(self) except_exts.update(child.deps[i].exts) # Remove the files in the outdir of the task but keep except_exts. exts = self.gc.exts.difference(except_exts) #print("Will remove its extensions: ", exts) paths += self.outdir.remove_exts(exts) if not follow_parents: return paths # Remove the files in the outdir of my parents if all the possible dependencies have been fulfilled. for parent in self.get_parents(): # Here we build a dictionary file extension --> list of child nodes requiring this file from parent # e.g {"WFK": [node1, node2]} ext2nodes = collections.defaultdict(list) for child in parent.get_children(): if child.status == child.S_OK: continue i = [d.node for d in child.deps].index(parent) for ext in child.deps[i].exts: ext2nodes[ext].append(child) # Remove extension only if no node depends on it! except_exts = [k for k, lst in ext2nodes.items() if lst] exts = self.gc.exts.difference(except_exts) #print("%s removes extensions %s from parent node %s" % (self, exts, parent)) paths += parent.outdir.remove_exts(exts) self.history.info("Removed files: %s" % paths) return paths
This method is called when the task reaches S_OK. It removes all the output files produced by the task that are not needed by its children as well as the output files produced by its parents if no other node needs them. Args: follow_parents: If true, the output files of the parents nodes will be removed if possible. Return: list with the absolute paths of the files that have been removed.
def plot_pca_component_variance(clf, title='PCA Component Explained Variances', target_explained_variance=0.75, ax=None, figsize=None, title_fontsize="large", text_fontsize="medium"): """Plots PCA components' explained variance ratios. (new in v0.2.2) Args: clf: PCA instance that has the ``explained_variance_ratio_`` attribute. title (string, optional): Title of the generated plot. Defaults to "PCA Component Explained Variances" target_explained_variance (float, optional): Looks for the minimum number of principal components that satisfies this value and emphasizes it on the plot. Defaults to 0.75 ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot as skplt >>> pca = PCA(random_state=1) >>> pca.fit(X) >>> skplt.decomposition.plot_pca_component_variance(pca) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_pca_component_variance.png :align: center :alt: PCA Component variances """ if not hasattr(clf, 'explained_variance_ratio_'): raise TypeError('"clf" does not have explained_variance_ratio_ ' 'attribute. Has the PCA been fitted?') if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) ax.set_title(title, fontsize=title_fontsize) cumulative_sum_ratios = np.cumsum(clf.explained_variance_ratio_) # Magic code for figuring out closest value to target_explained_variance idx = np.searchsorted(cumulative_sum_ratios, target_explained_variance) ax.plot(range(len(clf.explained_variance_ratio_) + 1), np.concatenate(([0], np.cumsum(clf.explained_variance_ratio_))), '*-') ax.grid(True) ax.set_xlabel('First n principal components', fontsize=text_fontsize) ax.set_ylabel('Explained variance ratio of first n components', fontsize=text_fontsize) ax.set_ylim([-0.02, 1.02]) if idx < len(cumulative_sum_ratios): ax.plot(idx+1, cumulative_sum_ratios[idx], 'ro', label='{0:0.3f} Explained variance ratio for ' 'first {1} components'.format(cumulative_sum_ratios[idx], idx+1), markersize=4, markeredgewidth=4) ax.axhline(cumulative_sum_ratios[idx], linestyle=':', lw=3, color='black') ax.tick_params(labelsize=text_fontsize) ax.legend(loc="best", fontsize=text_fontsize) return ax
Plots PCA components' explained variance ratios. (new in v0.2.2) Args: clf: PCA instance that has the ``explained_variance_ratio_`` attribute. title (string, optional): Title of the generated plot. Defaults to "PCA Component Explained Variances" target_explained_variance (float, optional): Looks for the minimum number of principal components that satisfies this value and emphasizes it on the plot. Defaults to 0.75 ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot as skplt >>> pca = PCA(random_state=1) >>> pca.fit(X) >>> skplt.decomposition.plot_pca_component_variance(pca) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_pca_component_variance.png :align: center :alt: PCA Component variances
def _get_account(self, address): """Get account by address. :param address: :return: """ state = self._get_head_state() account_address = binascii.a2b_hex(utils.remove_0x_head(address)) return state.get_and_cache_account(account_address)
Get account by address. :param address: :return:
def _handle_port_request(self, client_data, writer): """Given a port request body, parse it and respond appropriately. Args: client_data: The request bytes from the client. writer: The asyncio Writer for the response to be written to. """ try: pid = int(client_data) except ValueError as error: self._client_request_errors += 1 log.warning('Could not parse request: %s', error) return log.info('Request on behalf of pid %d.', pid) log.info('cmdline: %s', _get_process_command_line(pid)) if not _should_allocate_port(pid): self._denied_allocations += 1 return port = self._port_pool.get_port_for_process(pid) if port > 0: self._total_allocations += 1 writer.write('{:d}\n'.format(port).encode('utf-8')) log.debug('Allocated port %d to pid %d', port, pid) else: self._denied_allocations += 1
Given a port request body, parse it and respond appropriately. Args: client_data: The request bytes from the client. writer: The asyncio Writer for the response to be written to.
def __ConstructQueryParams(self, query_params, request, global_params): """Construct a dictionary of query parameters for this request.""" # First, handle the global params. global_params = self.__CombineGlobalParams( global_params, self.__client.global_params) global_param_names = util.MapParamNames( [x.name for x in self.__client.params_type.all_fields()], self.__client.params_type) global_params_type = type(global_params) query_info = dict( (param, self.__FinalUrlValue(getattr(global_params, param), getattr(global_params_type, param))) for param in global_param_names) # Next, add the query params. query_param_names = util.MapParamNames(query_params, type(request)) request_type = type(request) query_info.update( (param, self.__FinalUrlValue(getattr(request, param, None), getattr(request_type, param))) for param in query_param_names) query_info = dict((k, v) for k, v in query_info.items() if v is not None) query_info = self.__EncodePrettyPrint(query_info) query_info = util.MapRequestParams(query_info, type(request)) return query_info
Construct a dictionary of query parameters for this request.
def dfs_tree(graph, start=0): """DFS, build DFS tree in unweighted graph :param graph: directed graph in listlist or listdict format :param int start: source vertex :returns: precedence table :complexity: `O(|V|+|E|)` """ to_visit = [start] prec = [None] * len(graph) while to_visit: # an empty queue equals False node = to_visit.pop() for neighbor in graph[node]: if prec[neighbor] is None: prec[neighbor] = node to_visit.append(neighbor) return prec
DFS, build DFS tree in unweighted graph :param graph: directed graph in listlist or listdict format :param int start: source vertex :returns: precedence table :complexity: `O(|V|+|E|)`
def generate_new_id(self): """ generate new id and event hook for new Individual """ self.events.append(Event()) indiv_id = self.indiv_counter self.indiv_counter += 1 return indiv_id
generate new id and event hook for new Individual
def import_schema_to_json(name, store_it=False): """ loads the given schema name from the local filesystem and puts it into a store if it is not in there yet :param name: :param store_it: if set to True, stores the contents :return: """ schema_file = u"%s.json" % name file_path = os.path.join(SCHEMA_ROOT, schema_file) log.debug(u"trying to load %s " % file_path) schema = None try: schema_file = open(file_path, "r").read() except IOError, e: log.error(u"file not found %s" % e) msg = "Could not find schema file. %s" % file_path raise SalesKingException("SCHEMA_NOT_FOUND", msg) schema = json.loads(schema_file) if schema is None: msg = "loading failed foo %s" % name raise SalesKingException("SCHEMA_NOT_FOUND", msg) return schema
loads the given schema name from the local filesystem and puts it into a store if it is not in there yet :param name: :param store_it: if set to True, stores the contents :return:
def end_output (self, **kwargs): """Write end of checking info as HTML.""" if self.has_part("stats"): self.write_stats() if self.has_part("outro"): self.write_outro() self.close_fileoutput()
Write end of checking info as HTML.
def next(self): """Returns the next input from this input reader as (ZipInfo, opener) tuple. Returns: The next input from this input reader, in the form of a 2-tuple. The first element of the tuple is a zipfile.ZipInfo object. The second element of the tuple is a zero-argument function that, when called, returns the complete body of the file. """ if not self._zip: self._zip = zipfile.ZipFile(self._reader(self._blob_key)) # Get a list of entries, reversed so we can pop entries off in order self._entries = self._zip.infolist()[self._start_index:self._end_index] self._entries.reverse() if not self._entries: raise StopIteration() entry = self._entries.pop() self._start_index += 1 return (entry, lambda: self._read(entry))
Returns the next input from this input reader as (ZipInfo, opener) tuple. Returns: The next input from this input reader, in the form of a 2-tuple. The first element of the tuple is a zipfile.ZipInfo object. The second element of the tuple is a zero-argument function that, when called, returns the complete body of the file.
def simxGetInMessageInfo(clientID, infoType): ''' Please have a look at the function description/documentation in the V-REP user manual ''' info = ct.c_int() return c_GetInMessageInfo(clientID, infoType, ct.byref(info)), info.value
Please have a look at the function description/documentation in the V-REP user manual
def _compute_type_url(klass, prefix=_GOOGLE_APIS_PREFIX): """Compute a type URL for a klass. :type klass: type :param klass: class to be used as a factory for the given type :type prefix: str :param prefix: URL prefix for the type :rtype: str :returns: the URL, prefixed as appropriate """ name = klass.DESCRIPTOR.full_name return "%s/%s" % (prefix, name)
Compute a type URL for a klass. :type klass: type :param klass: class to be used as a factory for the given type :type prefix: str :param prefix: URL prefix for the type :rtype: str :returns: the URL, prefixed as appropriate
def _update_zone_bypass_status(self, message=None, status=None, zone=None): """ Uses the provided message to update the zone bypass state. :param message: message to use to update :type message: :py:class:`~alarmdecoder.messages.Message` :param status: bypass status, overrides message bits. :type status: bool :param zone: zone associated with bypass event :type zone: int :returns: dictionary {Zone:True|False,...} Zone can be None if LRR CID Bypass checking is disabled or we do not know what zones but know something is bypassed. """ bypass_status = status if isinstance(message, Message): bypass_status = message.zone_bypassed if bypass_status is None: return old_bypass_status = self._bypass_status.get(zone, None) if bypass_status != old_bypass_status: if bypass_status == False and zone is None: self._bypass_status = {} else: self._bypass_status[zone] = bypass_status if old_bypass_status is not None or message is None or (old_bypass_status is None and bypass_status is True): self.on_bypass(status=bypass_status, zone=zone) return bypass_status
Uses the provided message to update the zone bypass state. :param message: message to use to update :type message: :py:class:`~alarmdecoder.messages.Message` :param status: bypass status, overrides message bits. :type status: bool :param zone: zone associated with bypass event :type zone: int :returns: dictionary {Zone:True|False,...} Zone can be None if LRR CID Bypass checking is disabled or we do not know what zones but know something is bypassed.
def append_to_arg_count(self, data): """ Add digit to the input argument. :param data: the typed digit as string """ assert data in '-0123456789' current = self._arg if data == '-': assert current is None or current == '-' result = data elif current is None: result = data else: result = "%s%s" % (current, data) self.input_processor.arg = result
Add digit to the input argument. :param data: the typed digit as string
def create_salt(length: int=128) -> bytes: """ Create a new salt :param int length: How many bytes should the salt be long? :return: The salt :rtype: bytes """ return b''.join(bytes([SystemRandom().randint(0, 255)]) for _ in range(length))
Create a new salt :param int length: How many bytes should the salt be long? :return: The salt :rtype: bytes
def get_tx_power(self, tx_power): """Validates tx_power against self.tx_power_table @param tx_power: index into the self.tx_power_table list; if tx_power is 0 then the max power from self.tx_power_table @return: a dict {antenna: (tx_power_index, power_dbm)} from self.tx_power_table @raise: LLRPError if the requested index is out of range """ if not self.tx_power_table: logger.warn('get_tx_power(): tx_power_table is empty!') return {} logger.debug('requested tx_power: %s', tx_power) min_power = self.tx_power_table.index(min(self.tx_power_table)) max_power = self.tx_power_table.index(max(self.tx_power_table)) ret = {} for antid, tx_power in tx_power.items(): if tx_power == 0: # tx_power = 0 means max power max_power_dbm = max(self.tx_power_table) tx_power = self.tx_power_table.index(max_power_dbm) ret[antid] = (tx_power, max_power_dbm) try: power_dbm = self.tx_power_table[tx_power] ret[antid] = (tx_power, power_dbm) except IndexError: raise LLRPError('Invalid tx_power for antenna {}: ' 'requested={}, min_available={}, ' 'max_available={}'.format( antid, self.tx_power, min_power, max_power)) return ret
Validates tx_power against self.tx_power_table @param tx_power: index into the self.tx_power_table list; if tx_power is 0 then the max power from self.tx_power_table @return: a dict {antenna: (tx_power_index, power_dbm)} from self.tx_power_table @raise: LLRPError if the requested index is out of range
def send(self, message): """Send our message Args: message (str): The message to be sent. Returns: requests.models.Response: The response from the request. """ body = { 'notificationType': self._notification_type, 'priority': self._priority, 'isOrganization': self._is_organization, 'message': message, } if self._recipients: body['recipients'] = self._recipients self._tcex.log.debug('notification body: {}'.format(json.dumps(body))) # create our tcex resource resource = resource = self._tcex.resource('Notification') resource.http_method = 'POST' resource.body = json.dumps(body) results = resource.request() # do the request if results.get('response').status_code == 200: # everything worked response = results.get('response').json() elif results.get('response').status_code == 400: # failed..but known... user doesn't exist # just return and let calling app handle it err = 'Failed to send notification ({})'.format(results.get('response').text) self._tcex.log.error(err) response = results.get('response').json() else: # somekind of unknown error...raise err = 'Failed to send notification ({})'.format(results.get('response').text) self._tcex.log.error(err) raise RuntimeError(err) return response
Send our message Args: message (str): The message to be sent. Returns: requests.models.Response: The response from the request.
def Push(self, source_file, device_filename, mtime='0', timeout_ms=None, progress_callback=None, st_mode=None): """Push a file or directory to the device. Args: source_file: Either a filename, a directory or file-like object to push to the device. device_filename: Destination on the device to write to. mtime: Optional, modification time to set on the file. timeout_ms: Expected timeout for any part of the push. st_mode: stat mode for filename progress_callback: callback method that accepts filename, bytes_written and total_bytes, total_bytes will be -1 for file-like objects """ if isinstance(source_file, str): if os.path.isdir(source_file): self.Shell("mkdir " + device_filename) for f in os.listdir(source_file): self.Push(os.path.join(source_file, f), device_filename + '/' + f, progress_callback=progress_callback) return source_file = open(source_file, "rb") with source_file: connection = self.protocol_handler.Open( self._handle, destination=b'sync:', timeout_ms=timeout_ms) kwargs={} if st_mode is not None: kwargs['st_mode'] = st_mode self.filesync_handler.Push(connection, source_file, device_filename, mtime=int(mtime), progress_callback=progress_callback, **kwargs) connection.Close()
Push a file or directory to the device. Args: source_file: Either a filename, a directory or file-like object to push to the device. device_filename: Destination on the device to write to. mtime: Optional, modification time to set on the file. timeout_ms: Expected timeout for any part of the push. st_mode: stat mode for filename progress_callback: callback method that accepts filename, bytes_written and total_bytes, total_bytes will be -1 for file-like objects
def on_press(self, window, key, scancode, action, mods): """ Key handler for key presses. """ # controls for moving position if key == glfw.KEY_W: self.pos[0] -= self._pos_step # dec x elif key == glfw.KEY_S: self.pos[0] += self._pos_step # inc x elif key == glfw.KEY_A: self.pos[1] -= self._pos_step # dec y elif key == glfw.KEY_D: self.pos[1] += self._pos_step # inc y elif key == glfw.KEY_F: self.pos[2] -= self._pos_step # dec z elif key == glfw.KEY_R: self.pos[2] += self._pos_step # inc z # controls for moving orientation elif key == glfw.KEY_Z: drot = rotation_matrix(angle=0.1, direction=[1., 0., 0.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates x elif key == glfw.KEY_X: drot = rotation_matrix(angle=-0.1, direction=[1., 0., 0.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates x elif key == glfw.KEY_T: drot = rotation_matrix(angle=0.1, direction=[0., 1., 0.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates y elif key == glfw.KEY_G: drot = rotation_matrix(angle=-0.1, direction=[0., 1., 0.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates y elif key == glfw.KEY_C: drot = rotation_matrix(angle=0.1, direction=[0., 0., 1.])[:3, :3] self.rotation = self.rotation.dot(drot) # rotates z elif key == glfw.KEY_V: drot = rotation_matrix(angle=-0.1, direction=[0., 0., 1.])[:3, :3] self.rotation = self.rotation.dot(drot)
Key handler for key presses.
def get_registered_configs(self, instances=None): """ Return the persisted values of all config files registered with the config manager. """ configs = self.state.get('config_files', {}) if instances is not None: for config_file, config in configs.items(): if config['instance_name'] not in instances: configs.pop(config_file) return configs
Return the persisted values of all config files registered with the config manager.
def handle_exception(self, exc): """ Handle any exception that occurs, by returning an appropriate response, or re-raising the error. """ if isinstance(exc, (exceptions.NotAuthenticated, exceptions.AuthenticationFailed)): # WWW-Authenticate header for 401 responses, else coerce to 403 auth_header = self.get_authenticate_header(self.request) if auth_header: exc.auth_header = auth_header else: exc.status_code = status.HTTP_403_FORBIDDEN exception_handler = self.settings.EXCEPTION_HANDLER context = self.get_exception_handler_context() response = exception_handler(exc, context) if response is None: raise response.exception = True return response
Handle any exception that occurs, by returning an appropriate response, or re-raising the error.
def list_networks(full_ids=False): """ Lists networks on the Docker remote host, similar to ``docker network ls``. :param full_ids: Shows the full network ids. When ``False`` (default) only shows the first 12 characters. :type full_ids: bool """ networks = docker_fabric().networks() _format_output_table(networks, NETWORK_COLUMNS, full_ids)
Lists networks on the Docker remote host, similar to ``docker network ls``. :param full_ids: Shows the full network ids. When ``False`` (default) only shows the first 12 characters. :type full_ids: bool
def update_resource(self, path, data, if_match=None): """Update the required resource.""" response = self._http_request(resource=path, method="PUT", body=data, if_match=if_match) try: return response.json() except ValueError: raise exception.ServiceException("Invalid service response.")
Update the required resource.
def inject(fun: Callable) -> Callable: """ A decorator for injection dependencies into functions/methods, based on their type annotations. .. code-block:: python class SomeClass: @inject def __init__(self, my_dep: DepType) -> None: self.my_dep = my_dep .. important:: On the opposite to :class:`~haps.Inject`, dependency is injected at the moment of method invocation. In case of decorating `__init__`, dependency is injected when `SomeClass` instance is created. :param fun: callable with annotated parameters :return: decorated callable """ sig = inspect.signature(fun) injectables: Dict[str, Any] = {} for name, param in sig.parameters.items(): type_ = param.annotation if name == 'self': continue else: injectables[name] = type_ @wraps(fun) def _inner(*args, **kwargs): container = Container() for n, t in injectables.items(): if n not in kwargs: kwargs[n] = container.get_object(t) return fun(*args, **kwargs) return _inner
A decorator for injection dependencies into functions/methods, based on their type annotations. .. code-block:: python class SomeClass: @inject def __init__(self, my_dep: DepType) -> None: self.my_dep = my_dep .. important:: On the opposite to :class:`~haps.Inject`, dependency is injected at the moment of method invocation. In case of decorating `__init__`, dependency is injected when `SomeClass` instance is created. :param fun: callable with annotated parameters :return: decorated callable
def _load_with_overrides(base) -> Dict[str, str]: """ Load an config or write its defaults """ should_write = False overrides = _get_environ_overrides() try: index = json.load((base/_CONFIG_FILENAME).open()) except (OSError, json.JSONDecodeError) as e: sys.stderr.write("Error loading config from {}: {}\nRewriting...\n" .format(str(base), e)) should_write = True index = generate_config_index(overrides) for key in CONFIG_ELEMENTS: if key.name not in index: sys.stderr.write( f"New config index key {key.name}={key.default}" "\nRewriting...\n") if key.kind in (ConfigElementType.DIR, ConfigElementType.FILE): index[key.name] = base/key.default else: index[key.name] = key.default should_write = True if should_write: try: write_config(index, path=base) except Exception as e: sys.stderr.write( "Error writing config to {}: {}\nProceeding memory-only\n" .format(str(base), e)) index.update(overrides) return index
Load an config or write its defaults
def set_encoding (parsobj, attrs): """ Set document encoding for the HTML parser according to the <meta> tag attribute information. @param attrs: attributes of a <meta> HTML tag @type attrs: dict @return: None """ charset = attrs.get_true('charset', u'') if charset: # <meta charset="utf-8"> # eg. in http://cn.dolphin-browser.com/activity/Dolphinjump charset = charset.encode('ascii', 'ignore').lower() elif attrs.get_true('http-equiv', u'').lower() == u"content-type": # <meta http-equiv="content-type" content="text/html;charset="utf-8"> charset = attrs.get_true('content', u'') charset = charset.encode('ascii', 'ignore').lower() charset = get_ctype_charset(charset) if charset and charset in SUPPORTED_CHARSETS: parsobj.encoding = charset
Set document encoding for the HTML parser according to the <meta> tag attribute information. @param attrs: attributes of a <meta> HTML tag @type attrs: dict @return: None
def corr(self, method='pearson', min_periods=1): """ Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float. Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior .. versionadded:: 0.24.0 min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Currently only available for Pearson and Spearman correlation. Returns ------- DataFrame Correlation matrix. See Also -------- DataFrame.corrwith Series.corr Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr(method=histogram_intersection) dogs cats dogs 1.0 0.3 cats 0.3 1.0 """ numeric_df = self._get_numeric_data() cols = numeric_df.columns idx = cols.copy() mat = numeric_df.values if method == 'pearson': correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods) elif method == 'spearman': correl = libalgos.nancorr_spearman(ensure_float64(mat), minp=min_periods) elif method == 'kendall' or callable(method): if min_periods is None: min_periods = 1 mat = ensure_float64(mat).T corrf = nanops.get_corr_func(method) K = len(cols) correl = np.empty((K, K), dtype=float) mask = np.isfinite(mat) for i, ac in enumerate(mat): for j, bc in enumerate(mat): if i > j: continue valid = mask[i] & mask[j] if valid.sum() < min_periods: c = np.nan elif i == j: c = 1. elif not valid.all(): c = corrf(ac[valid], bc[valid]) else: c = corrf(ac, bc) correl[i, j] = c correl[j, i] = c else: raise ValueError("method must be either 'pearson', " "'spearman', 'kendall', or a callable, " "'{method}' was supplied".format(method=method)) return self._constructor(correl, index=idx, columns=cols)
Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float. Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior .. versionadded:: 0.24.0 min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Currently only available for Pearson and Spearman correlation. Returns ------- DataFrame Correlation matrix. See Also -------- DataFrame.corrwith Series.corr Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr(method=histogram_intersection) dogs cats dogs 1.0 0.3 cats 0.3 1.0
def format_summary(self): """Generate a summary string for the progress bar.""" chunks = [chunk.format_chunk_summary() for chunk in self._progress_chunks] return "/".join(chunks)
Generate a summary string for the progress bar.
def get_queryset(self): ''' Recent events are listed in link form. ''' return Event.objects.filter( Q(startTime__gte=timezone.now() - timedelta(days=90)) & ( Q(series__isnull=False) | Q(publicevent__isnull=False) ) ).annotate(count=Count('eventregistration')).annotate(**self.get_annotations()).exclude( Q(count=0) & Q(status__in=[ Event.RegStatus.hidden, Event.RegStatus.regHidden, Event.RegStatus.disabled ]) )
Recent events are listed in link form.
def check_and_set_unreachability(self, hosts, services): """ Check if all dependencies are down, if yes set this object as unreachable. todo: this function do not care about execution_failure_criteria! :param hosts: hosts objects, used to get object in act_depend_of :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get object in act_depend_of :type services: alignak.objects.service.Services :return: None """ parent_is_down = [] for (dep_id, _, _, _) in self.act_depend_of: if dep_id in hosts: dep = hosts[dep_id] else: dep = services[dep_id] if dep.state in ['d', 'DOWN', 'c', 'CRITICAL', 'u', 'UNKNOWN', 'x', 'UNREACHABLE']: parent_is_down.append(True) else: parent_is_down.append(False) if False in parent_is_down: return # all parents down self.set_unreachable()
Check if all dependencies are down, if yes set this object as unreachable. todo: this function do not care about execution_failure_criteria! :param hosts: hosts objects, used to get object in act_depend_of :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get object in act_depend_of :type services: alignak.objects.service.Services :return: None
def convert_to_decimal(string): """ Decode the exif-gps format into a decimal point. '[51, 4, 1234/34]' -> 51.074948366 """ number_or_fraction = '(?:\d{1,2}) | (?:\d{1,10} \\ \d{1,10})' m = re.compile('''\[?\s? # opening bracket \d{{1,2}}\s?,\s? # first number {0} \s?,\s? # second number (can be a fraction) {0} \s?,\s? # third number (can be a fraction) \]?\s? # closing bracket '''.format(number_or_fraction), re.VERBOSE) if not m.match(string): raise ValueError h, m, s = re.sub('\[|\]', '', string).split(', ') result = int(h) if '/' in m: m = m.split('/') result += int(m[0]) * 1.0 / int(m[1]) / 60 else: result += int(m) * 1.0 / 60 if '/' in s: s = s.split('/') result += int(s[0]) * 1.0 / int(s[1]) / 3600 else: result += int(s) * 1.0 / 60 return result
Decode the exif-gps format into a decimal point. '[51, 4, 1234/34]' -> 51.074948366
def _find_cf_standard_name_table(self, ds): ''' Parse out the `standard_name_vocabulary` attribute and download that version of the cf standard name table. If the standard name table has already been downloaded, use the cached version. Modifies `_std_names` attribute to store standard names. Returns True if the file exists and False if it fails to download. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: bool ''' # Get the standard name vocab standard_name_vocabulary = getattr(ds, 'standard_name_vocabulary', '') # Try to parse this attribute to get version version = None try: if 'cf standard name table' in standard_name_vocabulary.lower(): version = [s.strip('(').strip(')').strip('v').strip(',') for s in standard_name_vocabulary.split()] # This assumes that table version number won't start with 0. version = [s for s in version if s.isdigit() and len(s) <= 2 and not s.startswith('0')] if len(version) > 1: return False else: version = version[0] else: # Can't parse the attribute, use the packaged version return False # usually raised from .lower() with an incompatible (non-string) # data type except AttributeError: warn("Cannot convert standard name table to lowercase. This can " "occur if a non-string standard_name_vocabulary global " "attribute is supplied") return False if version.startswith('v'): # i.e 'v34' -> '34' drop the v version = version[1:] # If the packaged version is what we're after, then we're good if version == self._std_names._version: print("Using packaged standard name table v{0}".format(version), file=sys.stderr) return False # Try to download the version specified try: data_directory = util.create_cached_data_dir() location = os.path.join(data_directory, 'cf-standard-name-table-test-{0}.xml'.format(version)) # Did we already download this before? if not os.path.isfile(location): util.download_cf_standard_name_table(version, location) print("Using downloaded standard name table v{0}".format(version), file=sys.stderr) else: print("Using cached standard name table v{0} from {1}".format(version, location), file=sys.stderr) self._std_names = util.StandardNameTable(location) return True except Exception as e: # There was an error downloading the CF table. That's ok, we'll just use the packaged version warn("Problem fetching standard name table:\n{0}\n" "Using packaged v{1}".format(e, self._std_names._version)) return False
Parse out the `standard_name_vocabulary` attribute and download that version of the cf standard name table. If the standard name table has already been downloaded, use the cached version. Modifies `_std_names` attribute to store standard names. Returns True if the file exists and False if it fails to download. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: bool
def compute_verify_data(self, con_end, read_or_write, handshake_msg, master_secret): """ Return verify_data based on handshake messages, connection end, master secret, and read_or_write position. See RFC 5246, section 7.4.9. Every TLS 1.2 cipher suite has a verify_data of length 12. Note also: "This PRF with the SHA-256 hash function is used for all cipher suites defined in this document and in TLS documents published prior to this document when TLS 1.2 is negotiated." Cipher suites using SHA-384 were defined later on. """ if self.tls_version < 0x0300: return None elif self.tls_version == 0x0300: if read_or_write == "write": d = {"client": b"CLNT", "server": b"SRVR"} else: d = {"client": b"SRVR", "server": b"CLNT"} label = d[con_end] sslv3_md5_pad1 = b"\x36" * 48 sslv3_md5_pad2 = b"\x5c" * 48 sslv3_sha1_pad1 = b"\x36" * 40 sslv3_sha1_pad2 = b"\x5c" * 40 md5 = _tls_hash_algs["MD5"]() sha1 = _tls_hash_algs["SHA"]() md5_hash = md5.digest(master_secret + sslv3_md5_pad2 + md5.digest(handshake_msg + label + master_secret + sslv3_md5_pad1)) sha1_hash = sha1.digest(master_secret + sslv3_sha1_pad2 + sha1.digest(handshake_msg + label + master_secret + sslv3_sha1_pad1)) # noqa: E501 verify_data = md5_hash + sha1_hash else: if read_or_write == "write": d = {"client": "client", "server": "server"} else: d = {"client": "server", "server": "client"} label = ("%s finished" % d[con_end]).encode() if self.tls_version <= 0x0302: s1 = _tls_hash_algs["MD5"]().digest(handshake_msg) s2 = _tls_hash_algs["SHA"]().digest(handshake_msg) verify_data = self.prf(master_secret, label, s1 + s2, 12) else: if self.hash_name in ["MD5", "SHA"]: h = _tls_hash_algs["SHA256"]() else: h = _tls_hash_algs[self.hash_name]() s = h.digest(handshake_msg) verify_data = self.prf(master_secret, label, s, 12) return verify_data
Return verify_data based on handshake messages, connection end, master secret, and read_or_write position. See RFC 5246, section 7.4.9. Every TLS 1.2 cipher suite has a verify_data of length 12. Note also: "This PRF with the SHA-256 hash function is used for all cipher suites defined in this document and in TLS documents published prior to this document when TLS 1.2 is negotiated." Cipher suites using SHA-384 were defined later on.