code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def make_library(self, diffuse_yaml, catalog_yaml, binning_yaml): ret_dict = {} components_dict = Component.build_from_yamlfile(binning_yaml) diffuse_ret_dict = make_diffuse_comp_info_dict(GalpropMapManager=self._gmm, DiffuseModelManager=self._dmm, library=diffuse_yaml, components=components_dict) catalog_ret_dict = make_catalog_comp_dict(library=catalog_yaml, CatalogSourceManager=self._csm) ret_dict.update(diffuse_ret_dict['comp_info_dict']) ret_dict.update(catalog_ret_dict['comp_info_dict']) self._library.update(ret_dict) return ret_dict
Build up the library of all the components Parameters ---------- diffuse_yaml : str Name of the yaml file with the library of diffuse component definitions catalog_yaml : str Name of the yaml file width the library of catalog split definitions binning_yaml : str Name of the yaml file with the binning definitions
def rowsAfterValue(self, value, count): if value is None: query = self.inequalityQuery(None, count, True) else: pyvalue = self._toComparableValue(value) currentSortAttribute = self.currentSortColumn.sortAttribute() query = self.inequalityQuery(currentSortAttribute >= pyvalue, count, True) return self.constructRows(query)
Retrieve some rows at or after a given sort-column value. @param value: Starting value in the index for the current sort column at which to start returning results. Rows with a column value for the current sort column which is greater than or equal to this value will be returned. @type value: Some type compatible with the current sort column, or None, to specify the beginning of the data. @param count: The maximum number of rows to return. @type count: C{int} @return: A list of row data, ordered by the current sort column, beginning at C{value} and containing at most C{count} elements.
def suspend(name, call=None): if call != 'action': raise SaltCloudSystemExit( 'The suspend action must be called with ' '-a or --action.' ) vm_properties = [ "name", "summary.runtime.powerState" ] vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm["name"] == name: if vm["summary.runtime.powerState"] == "poweredOff": ret = 'cannot suspend in powered off state' log.info('VM %s %s', name, ret) return ret elif vm["summary.runtime.powerState"] == "suspended": ret = 'already suspended' log.info('VM %s %s', name, ret) return ret try: log.info('Suspending VM %s', name) task = vm["object"].Suspend() salt.utils.vmware.wait_for_task(task, name, 'suspend') except Exception as exc: log.error( 'Error while suspending VM %s: %s', name, exc, exc_info_on_loglevel=logging.DEBUG ) return 'failed to suspend' return 'suspended'
To suspend a VM using its name CLI Example: .. code-block:: bash salt-cloud -a suspend vmname
def process_header(self, data): metadata = { "datacolumns": data.read_chunk("I"), "firstyear": data.read_chunk("I"), "lastyear": data.read_chunk("I"), "annualsteps": data.read_chunk("I"), } if metadata["annualsteps"] != 1: raise InvalidTemporalResError( "{}: Only annual files can currently be processed".format(self.filepath) ) return metadata
Reads the first part of the file to get some essential metadata # Returns return (dict): the metadata in the header
def binary_to_int(binary_list, lower_bound=0, upper_bound=None): if binary_list == []: return lower_bound else: integer = int(''.join([str(bit) for bit in binary_list]), 2) if (upper_bound is not None) and integer + lower_bound > upper_bound: return upper_bound - (integer % (upper_bound - lower_bound + 1)) else: return integer + lower_bound
Return the base 10 integer corresponding to a binary list. The maximum value is determined by the number of bits in binary_list, and upper_bound. The greater allowed by the two. Args: binary_list: list<int>; List of 0s and 1s. lower_bound: Minimum value for output, inclusive. A binary list of 0s will have this value. upper_bound: Maximum value for output, inclusive. If greater than this bound, we "bounce back". Ex. w/ upper_bound = 2: [0, 1, 2, 2, 1, 0] Ex. raw_integer = 11, upper_bound = 10, return = 10 raw_integer = 12, upper_bound = 10, return = 9 Returns: int; Integer value of the binary input.
def quote(code): try: code = code.rstrip() except AttributeError: return code if code and code[0] + code[-1] not in ('""', "''", "u'", '"') \ and '"' not in code: return 'u"' + code + '"' else: return code
Returns quoted code if not already quoted and if possible Parameters ---------- code: String \tCode thta is quoted
def connect(self, peer_address): self._sock.connect(peer_address) peer_address = self._sock.getpeername() BIO_dgram_set_connected(self._wbio.value, peer_address) assert self._wbio is self._rbio if self._do_handshake_on_connect: self.do_handshake()
Client-side UDP connection establishment This method connects this object's underlying socket. It subsequently performs a handshake if do_handshake_on_connect was set during initialization. Arguments: peer_address - address tuple of server peer
def coerce_to_synchronous(func): if inspect.iscoroutinefunction(func): @functools.wraps(func) def sync_wrapper(*args, **kwargs): loop = asyncio.get_event_loop() try: loop.run_until_complete(func(*args, **kwargs)) finally: loop.close() return sync_wrapper return func
Given a function that might be async, wrap it in an explicit loop so it can be run in a synchronous context.
def git_exec(self, command, **kwargs): from .cli import verbose_echo command.insert(0, self.git) if kwargs.pop('no_verbose', False): verbose = False else: verbose = self.verbose verbose_echo(' '.join(command), verbose, self.fake) if not self.fake: result = self.repo.git.execute(command, **kwargs) else: if 'with_extended_output' in kwargs: result = (0, '', '') else: result = '' return result
Execute git commands
def vm_config(name, main, provider, profile, overrides): vm = main.copy() vm = salt.utils.dictupdate.update(vm, provider) vm = salt.utils.dictupdate.update(vm, profile) vm.update(overrides) vm['name'] = name return vm
Create vm config. :param str name: The name of the vm :param dict main: The main cloud config :param dict provider: The provider config :param dict profile: The profile config :param dict overrides: The vm's config overrides
def side_by_side(left, right): r left_lines = list(left.split('\n')) right_lines = list(right.split('\n')) diff = abs(len(left_lines) - len(right_lines)) if len(left_lines) > len(right_lines): fill = ' ' * len(right_lines[0]) right_lines += [fill] * diff elif len(right_lines) > len(left_lines): fill = ' ' * len(left_lines[0]) left_lines += [fill] * diff return '\n'.join(a + b for a, b in zip(left_lines, right_lines)) + '\n'
r"""Put two boxes next to each other. Assumes that all lines in the boxes are the same width. Example: >>> left = 'A \nC ' >>> right = 'B\nD' >>> print(side_by_side(left, right)) A B C D <BLANKLINE>
def setText(self, text): if self.text() == text: return self.touch() maxSize = len(self.text()) + 1 self.device.press('KEYCODE_DEL', adbclient.DOWN_AND_UP, repeat=maxSize) self.device.press('KEYCODE_FORWARD_DEL', adbclient.DOWN_AND_UP, repeat=maxSize) self.type(text, alreadyTouched=True)
This function makes sure that any previously entered text is deleted before setting the value of the field.
def _is_path(instance, attribute, s, exists=True): "Validator for path-yness" if not s: return if exists: if os.path.exists(s): return else: raise OSError("path does not exist") else: raise TypeError("Not a path?")
Validator for path-yness
def unwrap(self, value, session=None): self.validate_unwrap(value) ret = {} for value_dict in value: k = value_dict['k'] v = value_dict['v'] ret[self.key_type.unwrap(k, session=session)] = self.value_type.unwrap(v, session=session) return ret
Expects a list of dictionaries with ``k`` and ``v`` set to the keys and values that will be unwrapped into the output python dictionary should have. Validates the input and then constructs the dictionary from the list.
def convert(source, to, format=None, extra_args=(), encoding='utf-8'): return _convert( _read_file, _process_file, source, to, format, extra_args, encoding=encoding)
Convert given `source` from `format` `to` another. `source` may be either a file path or a string to be converted. It's possible to pass `extra_args` if needed. In case `format` is not provided, it will try to invert the format based on given `source`. Raises OSError if pandoc is not found! Make sure it has been installed and is available at path.
def add_new_entry(self, entry): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('El Torito Section Header not yet initialized') self.num_section_entries += 1 self.section_entries.append(entry)
A method to add a completely new entry to the list of entries of this header. Parameters: entry - The new EltoritoEntry object to add to the list of entries. Returns: Nothing.
def error(self, msg, n): raise SyntaxError(msg, n.lineno, n.col_offset, filename=self.compile_info.filename)
Raise a SyntaxError with the lineno and col_offset set to n's.
def fetch(self, recursive=1, exclude_children=False, exclude_back_refs=False): if not self.path.is_resource and not self.path.is_uuid: self.check() params = {} if exclude_children: params['exclude_children'] = True if exclude_back_refs: params['exclude_back_refs'] = True data = self.session.get_json(self.href, **params)[self.type] self.from_dict(data) return self
Fetch resource from the API server :param recursive: level of recursion for fetching resources :type recursive: int :param exclude_children: don't get children references :type exclude_children: bool :param exclude_back_refs: don't get back_refs references :type exclude_back_refs: bool :rtype: Resource
def get(self, request): if not self.hosts: return self._get(request.path, request.method, "") try: return self._get( request.path, request.method, request.headers.get("Host", "") ) except NotFound: return self._get(request.path, request.method, "")
Get a request handler based on the URL of the request, or raises an error :param request: Request object :return: handler, arguments, keyword arguments
def service_start(name): r = salt.utils.http.query(DETAILS['url']+'service/start/'+name, decode_type='json', decode=True) return r['dict']
Start a "service" on the REST server
def put_json(self, url, data, cls=None, **kwargs): kwargs['data'] = to_json(data, cls=cls) kwargs['headers'] = self.default_headers return self.put(url, **kwargs).json()
PUT data to the api-server :param url: resource location (eg: "/type/uuid") :type url: str :param cls: JSONEncoder class :type cls: JSONEncoder
def parse_request(self): ret = BaseHTTPRequestHandler.parse_request(self) if ret: mname = self.path.lstrip('/').split('/')[0] mname = self.command + ('_' + mname if mname else '') if hasattr(self, 'do_' + mname): self.command = mname return ret
Override parse_request method to enrich basic functionality of `BaseHTTPRequestHandler` class Original class can only invoke do_GET, do_POST, do_PUT, etc method implementations if they are defined. But we would like to have at least some simple routing mechanism, i.e.: GET /uri1/part2 request should invoke `do_GET_uri1()` POST /other should invoke `do_POST_other()` If the `do_<REQUEST_METHOD>_<first_part_url>` method does not exists we'll fallback to original behavior.
def GetFlagSuggestions(attempt, longopt_list): if len(attempt) <= 2 or not longopt_list: return [] option_names = [v.split('=')[0] for v in longopt_list] distances = [(_DamerauLevenshtein(attempt, option[0:len(attempt)]), option) for option in option_names] distances.sort(key=lambda t: t[0]) least_errors, _ = distances[0] if least_errors >= _SUGGESTION_ERROR_RATE_THRESHOLD * len(attempt): return [] suggestions = [] for errors, name in distances: if errors == least_errors: suggestions.append(name) else: break return suggestions
Get helpful similar matches for an invalid flag.
def get_object(self, base_: Type, qualifier: str = None) -> Any: egg_ = self._find_egg(base_, qualifier) if egg_ is None: raise UnknownDependency('Unknown dependency %s' % base_) scope_id = getattr(egg_.egg, '__haps_custom_scope', INSTANCE_SCOPE) try: _scope = self.scopes[scope_id] except KeyError: raise UnknownScope('Unknown scopes with id %s' % scope_id) else: with self._lock: return _scope.get_object(egg_.egg)
Get instance directly from the container. If the qualifier is not None, proper method to create/retrieve instance is used. :param base_: `base` of this object :param qualifier: optional qualifier :return: object instance
def lookup(source, keys, fallback = None): try: for key in keys: source = source[key] return source except (KeyError, AttributeError, TypeError): return fallback
Traverses the source, looking up each key. Returns None if can't find anything instead of raising an exception.
def dump_hex(ofd, start, len_, prefix=0): prefix_whitespaces = ' ' * prefix limit = 16 - (prefix * 2) start_ = start[:len_] for line in (start_[i:i + limit] for i in range(0, len(start_), limit)): hex_lines, ascii_lines = list(), list() for c in line: hex_lines.append('{0:02x}'.format(c if hasattr(c, 'real') else ord(c))) c2 = chr(c) if hasattr(c, 'real') else c ascii_lines.append(c2 if c2 in string.printable[:95] else '.') hex_line = ' '.join(hex_lines).ljust(limit * 3) ascii_line = ''.join(ascii_lines) ofd(' %s%s%s', prefix_whitespaces, hex_line, ascii_line)
Convert `start` to hex and logs it, 16 bytes per log statement. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L760 Positional arguments: ofd -- function to call with arguments similar to `logging.debug`. start -- bytearray() or bytearray_ptr() instance. len_ -- size of `start` (integer). Keyword arguments: prefix -- additional number of whitespace pairs to prefix each log statement with.
def launch_app(app_path, params=[], time_before_kill_app=15): import subprocess try: res = subprocess.call([app_path, params], timeout=time_before_kill_app, shell=True) print('res = ', res) if res == 0: return True else: return False except Exception as ex: print('error launching app ' + str(app_path) + ' with params ' + str(params) + '\n' + str(ex)) return False
start an app
def getTreeWalker(treeType, implementation=None, **kwargs): treeType = treeType.lower() if treeType not in treeWalkerCache: if treeType == "dom": from . import dom treeWalkerCache[treeType] = dom.TreeWalker elif treeType == "genshi": from . import genshi treeWalkerCache[treeType] = genshi.TreeWalker elif treeType == "lxml": from . import etree_lxml treeWalkerCache[treeType] = etree_lxml.TreeWalker elif treeType == "etree": from . import etree if implementation is None: implementation = default_etree return etree.getETreeModule(implementation, **kwargs).TreeWalker return treeWalkerCache.get(treeType)
Get a TreeWalker class for various types of tree with built-in support :arg str treeType: the name of the tree type required (case-insensitive). Supported values are: * "dom": The xml.dom.minidom DOM implementation * "etree": A generic walker for tree implementations exposing an elementtree-like interface (known to work with ElementTree, cElementTree and lxml.etree). * "lxml": Optimized walker for lxml.etree * "genshi": a Genshi stream :arg implementation: A module implementing the tree type e.g. xml.etree.ElementTree or cElementTree (Currently applies to the "etree" tree type only). :arg kwargs: keyword arguments passed to the etree walker--for other walkers, this has no effect :returns: a TreeWalker class
def event(self, event_name): self._event_dict.setdefault(event_name, 0) self._event_dict[event_name] += 1 self._log_progress_if_interval_elapsed()
Register an event that occurred during processing of a task of the given type. Args: event_name: str A name for a type of events. Events of the same type are displayed as a single entry and a total count of occurences.
def instantiate_child(self, nurest_object, from_template, response_choice=None, async=False, callback=None, commit=True): if not from_template.id: raise InternalConsitencyError("Cannot instantiate a child from a template with no ID: %s." % from_template) nurest_object.template_id = from_template.id return self._manage_child_object(nurest_object=nurest_object, async=async, method=HTTP_METHOD_POST, callback=callback, handler=self._did_create_child, response_choice=response_choice, commit=commit)
Instantiate an nurest_object from a template object Args: nurest_object: the NURESTObject object to add from_template: the NURESTObject template object callback: callback containing the object and the connection Returns: Returns the object and connection (object, connection) Example: >>> parent_entity = NUParentEntity(id="xxxx-xxxx-xxx-xxxx") # create a NUParentEntity with an existing ID (or retrieve one) >>> other_entity_template = NUOtherEntityTemplate(id="yyyy-yyyy-yyyy-yyyy") # create a NUOtherEntityTemplate with an existing ID (or retrieve one) >>> other_entity_instance = NUOtherEntityInstance(name="my new instance") # create a new NUOtherEntityInstance to be intantiated from other_entity_template >>> >>> parent_entity.instantiate_child(other_entity_instance, other_entity_template) # instatiate the new domain in the server
def _get_mouse_cursor(self): if self.mouse_cursor is not None: return self.mouse_cursor elif self.interactive and self.draggable: return gdk.CursorType.FLEUR elif self.interactive: return gdk.CursorType.HAND2
Determine mouse cursor. By default look for self.mouse_cursor is defined and take that. Otherwise use gdk.CursorType.FLEUR for draggable sprites and gdk.CursorType.HAND2 for interactive sprites. Defaults to scenes cursor.
def tar_archive(context): logger.debug("start") mode = get_file_mode_for_writing(context) for item in context['tar']['archive']: destination = context.get_formatted_string(item['out']) source = context.get_formatted_string(item['in']) with tarfile.open(destination, mode) as archive_me: logger.debug(f"Archiving '{source}' to '{destination}'") archive_me.add(source, arcname='.') logger.info(f"Archived '{source}' to '{destination}'") logger.debug("end")
Archive specified path to a tar archive. Args: context: dictionary-like. context is mandatory. context['tar']['archive'] must exist. It's a dictionary. keys are the paths to archive. values are the destination output paths. Example: tar: archive: - in: path/to/dir out: path/to/destination.tar.xs - in: another/my.file out: ./my.tar.xs This will archive directory path/to/dir to path/to/destination.tar.xs, and also archive file another/my.file to ./my.tar.xs
def XML(content, source=None): try: tree = ET.XML(content) except ET.ParseError as err: x_parse_error(err, content, source) return tree
Parses the XML text using the ET.XML function, but handling the ParseError in a user-friendly way.
def remove_xml_element(name, tree): remove = tree.findall( ".//{{http://soap.sforce.com/2006/04/metadata}}{}".format(name) ) if not remove: return tree parent_map = {c: p for p in tree.iter() for c in p} for elem in remove: parent = parent_map[elem] parent.remove(elem) return tree
Removes XML elements from an ElementTree content tree
def list_devices(self, **kwargs): kwargs = self._verify_sort_options(kwargs) kwargs = self._verify_filters(kwargs, Device, True) api = self._get_api(device_directory.DefaultApi) return PaginatedResponse(api.device_list, lwrap_type=Device, **kwargs)
List devices in the device catalog. Example usage, listing all registered devices in the catalog: .. code-block:: python filters = { 'state': {'$eq': 'registered' } } devices = api.list_devices(order='asc', filters=filters) for idx, d in enumerate(devices): print(idx, d.id) :param int limit: The number of devices to retrieve. :param str order: The ordering direction, ascending (asc) or descending (desc) :param str after: Get devices after/starting at given `device_id` :param filters: Dictionary of filters to apply. :returns: a list of :py:class:`Device` objects registered in the catalog. :rtype: PaginatedResponse
def append(self, item): if isinstance(item, Monomer): self._monomers.append(item) else: raise TypeError( 'Only Monomer objects can be appended to an Polymer.') return
Appends a `Monomer to the `Polymer`. Notes ----- Does not update labelling.
def convertDay(self, day, prefix="", weekday=False): def sameDay(d1, d2): d = d1.day == d2.day m = d1.month == d2.month y = d1.year == d2.year return d and m and y tom = self.now + datetime.timedelta(days=1) if sameDay(day, self.now): return "today" elif sameDay(day, tom): return "tomorrow" if weekday: dayString = day.strftime("%A, %B %d") else: dayString = day.strftime("%B %d") if not int(dayString[-2]): dayString = dayString[:-2] + dayString[-1] return prefix + " " + dayString
Convert a datetime object representing a day into a human-ready string that can be read, spoken aloud, etc. Args: day (datetime.date): A datetime object to be converted into text. prefix (str): An optional argument that prefixes the converted string. For example, if prefix="in", you'd receive "in two days", rather than "two days", while the method would still return "tomorrow" (rather than "in tomorrow"). weekday (bool): An optional argument that returns "Monday, Oct. 1" if True, rather than "Oct. 1". Returns: A string representation of the input day, ignoring any time-related information.
def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False
Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache
def peak_interval(self, name, alpha=_alpha, npoints=_npoints, **kwargs): data = self.get(name, **kwargs) return peak_interval(data,alpha,npoints)
Calculate peak interval for parameter.
def global_symbols_in_children(self): result = set() for child in self.children: result |= ( child.global_symbols | child.global_symbols_in_children) return result
This is based on all children referenced symbols that have not been declared. The intended use case is to ban the symbols from being used as remapped symbol values.
def K_to_F(self, K, method='doubling'): A1 = self.A + dot(self.C, K) B1 = self.B Q1 = self.Q R1 = self.R - self.beta * self.theta * dot(K.T, K) lq = LQ(Q1, R1, A1, B1, beta=self.beta) P, F, d = lq.stationary_values(method=method) return F, P
Compute agent 1's best value-maximizing response F, given K. Parameters ---------- K : array_like(float, ndim=2) A j x n array method : str, optional(default='doubling') Solution method used in solving the associated Riccati equation, str in {'doubling', 'qz'}. Returns ------- F : array_like(float, ndim=2) The policy function for a given K P : array_like(float, ndim=2) The value function for a given K
def _pagination(self): oldest = self.oldest newest = self.newest base = {key: val for key, val in self.spec.items() if key not in OFFSET_PRIORITY} oldest_neighbor = View({ **base, 'before': oldest, 'order': 'newest' }).first if oldest else None newest_neighbor = View({ **base, 'after': newest, 'order': 'oldest' }).first if newest else None if 'date' in self.spec: return self._get_date_pagination(base, oldest_neighbor, newest_neighbor) if 'count' in self.spec: return self._get_count_pagination(base, oldest_neighbor, newest_neighbor) return None, None
Compute the neighboring pages from this view. Returns a tuple of older page, newer page.
def string(_object): if is_callable(_object): _validator = _object @wraps(_validator) def decorated(value): ensure(isinstance(value, basestring), "not of type string") return _validator(value) return decorated ensure(isinstance(_object, basestring), "not of type string")
Validates a given input is of type string. Example usage:: data = {'a' : 21} schema = (string, 21) You can also use this as a decorator, as a way to check for the input before it even hits a validator you may be writing. .. note:: If the argument is a callable, the decorating behavior will be triggered, otherwise it will act as a normal function.
def decode_path(file_path): if file_path is None: return if isinstance(file_path, six.binary_type): file_path = file_path.decode(sys.getfilesystemencoding()) return file_path
Turn a path name into unicode.
def expand_indent(line): r if '\t' not in line: return len(line) - len(line.lstrip()) result = 0 for char in line: if char == '\t': result = result // 8 * 8 + 8 elif char == ' ': result += 1 else: break return result
r"""Return the amount of indentation. Tabs are expanded to the next multiple of 8. >>> expand_indent(' ') 4 >>> expand_indent('\t') 8 >>> expand_indent(' \t') 8 >>> expand_indent(' \t') 16
def get_jump_target_maps(code, opc): offset2prev = {} prev_offset = -1 for offset, op, arg in unpack_opargs_bytecode(code, opc): if prev_offset >= 0: prev_list = offset2prev.get(offset, []) prev_list.append(prev_offset) offset2prev[offset] = prev_list if op in opc.NOFOLLOW: prev_offset = -1 else: prev_offset = offset if arg is not None: jump_offset = -1 if op in opc.JREL_OPS: op_len = op_size(op, opc) jump_offset = offset + op_len + arg elif op in opc.JABS_OPS: jump_offset = arg if jump_offset >= 0: prev_list = offset2prev.get(jump_offset, []) prev_list.append(offset) offset2prev[jump_offset] = prev_list return offset2prev
Returns a dictionary where the key is an offset and the values are a list of instruction offsets which can get run before that instruction. This includes jump instructions as well as non-jump instructions. Therefore, the keys of the dictionary are reachable instructions. The values of the dictionary may be useful in control-flow analysis.
def match(self, subject: Union[Expression, FlatTerm]) -> Iterator[Tuple[T, Substitution]]: for index in self._match(subject): pattern, label = self._patterns[index] subst = Substitution() if subst.extract_substitution(subject, pattern.expression): for constraint in pattern.constraints: if not constraint(subst): break else: yield label, subst
Match the given subject against all patterns in the net. Args: subject: The subject that is matched. Must be constant. Yields: A tuple :code:`(final label, substitution)`, where the first component is the final label associated with the pattern as given when using :meth:`add()` and the second one is the match substitution.
def loads(cls, data): rep = cbor.loads(data) if not isinstance(rep, Sequence): raise SerializationError('expected a CBOR list') if len(rep) != 2: raise SerializationError('expected a CBOR list of 2 items') metadata = rep[0] if 'v' not in metadata: raise SerializationError('no version in CBOR metadata') if metadata['v'] != 'fc01': raise SerializationError('invalid CBOR version {!r} ' '(expected "fc01")' .format(metadata['v'])) read_only = metadata.get('ro', False) contents = rep[1] return cls.from_dict(contents, read_only=read_only)
Create a feature collection from a CBOR byte string.
def _process_scrape_info(self, scraper: BaseScraper, scrape_result: ScrapeResult, item_session: ItemSession): if not scrape_result: return 0, 0 num_inline = 0 num_linked = 0 for link_context in scrape_result.link_contexts: url_info = self.parse_url(link_context.link) if not url_info: continue url_info = self.rewrite_url(url_info) child_url_record = item_session.child_url_record( url_info.url, inline=link_context.inline ) if not self._fetch_rule.consult_filters(item_session.request.url_info, child_url_record)[0]: continue if link_context.inline: num_inline += 1 else: num_linked += 1 item_session.add_child_url(url_info.url, inline=link_context.inline, link_type=link_context.link_type) return num_inline, num_linked
Collect the URLs from the scrape info dict.
def status_unpin(self, id): id = self.__unpack_id(id) url = '/api/v1/statuses/{0}/unpin'.format(str(id)) return self.__api_request('POST', url)
Unpin a pinned status for the logged-in user. Returns a `toot dict`_ with the status that used to be pinned.
def schema(self): if not hasattr(self, "_schema"): ret = None o = self._type if isinstance(o, type): ret = getattr(o, "schema", None) elif isinstance(o, Schema): ret = o else: module, klass = utils.get_objects(o) ret = klass.schema self._schema = ret return self._schema
return the schema instance if this is reference to another table
def _make_ctx_options(ctx_options, config_cls=ContextOptions): if not ctx_options: return None for key in list(ctx_options): translation = _OPTION_TRANSLATIONS.get(key) if translation: if translation in ctx_options: raise ValueError('Cannot specify %s and %s at the same time' % (key, translation)) ctx_options[translation] = ctx_options.pop(key) return config_cls(**ctx_options)
Helper to construct a ContextOptions object from keyword arguments. Args: ctx_options: A dict of keyword arguments. config_cls: Optional Configuration class to use, default ContextOptions. Note that either 'options' or 'config' can be used to pass another Configuration object, but not both. If another Configuration object is given it provides default values. Returns: A Configuration object, or None if ctx_options is empty.
def get_sync_start_position(self, document, lineno): " Scan backwards, and find a possible position to start. " pattern = self._compiled_pattern lines = document.lines for i in range(lineno, max(-1, lineno - self.MAX_BACKWARDS), -1): match = pattern.match(lines[i]) if match: return i, match.start() if lineno < self.FROM_START_IF_NO_SYNC_POS_FOUND: return 0, 0 else: return lineno, 0
Scan backwards, and find a possible position to start.
def if_then(self, classical_reg, if_program, else_program=None): else_program = else_program if else_program is not None else Program() label_then = LabelPlaceholder("THEN") label_end = LabelPlaceholder("END") self.inst(JumpWhen(target=label_then, condition=unpack_classical_reg(classical_reg))) self.inst(else_program) self.inst(Jump(label_end)) self.inst(JumpTarget(label_then)) self.inst(if_program) self.inst(JumpTarget(label_end)) return self
If the classical register at index classical reg is 1, run if_program, else run else_program. Equivalent to the following construction: .. code:: IF [c]: instrA... ELSE: instrB... => JUMP-WHEN @THEN [c] instrB... JUMP @END LABEL @THEN instrA... LABEL @END :param int classical_reg: The classical register to check as the condition :param Program if_program: A Quil program to execute if classical_reg is 1 :param Program else_program: A Quil program to execute if classical_reg is 0. This argument is optional and defaults to an empty Program. :returns: The Quil Program with the branching instructions added. :rtype: Program
def _islots(self): if "__slots__" not in self.locals: return None for slots in self.igetattr("__slots__"): for meth in ITER_METHODS: try: slots.getattr(meth) break except exceptions.AttributeInferenceError: continue else: continue if isinstance(slots, node_classes.Const): if slots.value: yield slots continue if not hasattr(slots, "itered"): continue if isinstance(slots, node_classes.Dict): values = [item[0] for item in slots.items] else: values = slots.itered() if values is util.Uninferable: continue if not values: return values for elt in values: try: for inferred in elt.infer(): if inferred is util.Uninferable: continue if not isinstance( inferred, node_classes.Const ) or not isinstance(inferred.value, str): continue if not inferred.value: continue yield inferred except exceptions.InferenceError: continue return None
Return an iterator with the inferred slots.
def collect(self, force=False): if force or not self.changes: self.changes = tuple(self.collect_impl()) return self.changes
calls collect_impl and stores the results as the child changes of this super-change. Returns a tuple of the data generated from collect_impl. Caches the result rather than re-computing each time, unless force is True
def insertDatastore(self, index, store): if not isinstance(store, Datastore): raise TypeError("stores must be of type %s" % Datastore) self._stores.insert(index, store)
Inserts datastore `store` into this collection at `index`.
async def wait_for_group(self, container, networkid, timeout = 120): if networkid in self._current_groups: return self._current_groups[networkid] else: if not self._connection.connected: raise ConnectionResetException groupchanged = VXLANGroupChanged.createMatcher(self._connection, networkid, VXLANGroupChanged.UPDATED) conn_down = self._connection.protocol.statematcher(self._connection) timeout_, ev, m = await container.wait_with_timeout(timeout, groupchanged, conn_down) if timeout_: raise ValueError('VXLAN group is still not created after a long time') elif m is conn_down: raise ConnectionResetException else: return ev.physicalportid
Wait for a VXLAN group to be created
def result(self, timeout=None): if self.done: return self._result result = self.conn.process_packets(transaction_id=self.transaction_id, timeout=timeout) self._result = result self.done = True return result
Retrieves the result of the call. :param timeout: The time to wait for a result from the server. Raises :exc:`RTMPTimeoutError` on timeout.
def get_club_members(self, club_id, limit=None): result_fetcher = functools.partial(self.protocol.get, '/clubs/{id}/members', id=club_id) return BatchedResultsIterator(entity=model.Athlete, bind_client=self, result_fetcher=result_fetcher, limit=limit)
Gets the member objects for specified club ID. http://strava.github.io/api/v3/clubs/#get-members :param club_id: The numeric ID for the club. :type club_id: int :param limit: Maximum number of athletes to return. (default unlimited) :type limit: int :return: An iterator of :class:`stravalib.model.Athlete` objects. :rtype: :class:`BatchedResultsIterator`
def _get_validate(data): if data.get("vrn_file") and tz.get_in(["config", "algorithm", "validate"], data): return utils.deepish_copy(data) elif "group_orig" in data: for sub in multi.get_orig_items(data): if "validate" in sub["config"]["algorithm"]: sub_val = utils.deepish_copy(sub) sub_val["vrn_file"] = data["vrn_file"] return sub_val return None
Retrieve items to validate, from single samples or from combined joint calls.
def is_downloaded(self, file_path): if os.path.exists(file_path): self.chatbot.logger.info('File is already downloaded') return True return False
Check if the data file is already downloaded.
def isrot(m, ntol, dtol): m = stypes.toDoubleMatrix(m) ntol = ctypes.c_double(ntol) dtol = ctypes.c_double(dtol) return bool(libspice.isrot_c(m, ntol, dtol))
Indicate whether a 3x3 matrix is a rotation matrix. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/isrot_c.html :param m: A matrix to be tested. :type m: 3x3-Element Array of floats :param ntol: Tolerance for the norms of the columns of m. :type ntol: float :param dtol: Tolerance for the determinant of a matrix whose columns are the unitized columns of m. :type dtol: float :return: True if and only if m is a rotation matrix. :rtype: bool
def _get_voltage_magnitude_var(self, buses, generators): Vm = array([b.v_magnitude for b in buses]) for g in generators: Vm[g.bus._i] = g.v_magnitude Vmin = array([b.v_min for b in buses]) Vmax = array([b.v_max for b in buses]) return Variable("Vm", len(buses), Vm, Vmin, Vmax)
Returns the voltage magnitude variable set.
def broadcast_channel(message, channel): try: socket = CLIENTS[CHANNELS.get(channel, [])[0]][1] except (IndexError, KeyError): raise NoSocket("There are no clients on the channel: " + channel) socket.send_and_broadcast_channel(message, channel)
Find the first socket for the given channel, and use it to broadcast to the channel, including the socket itself.
def make_prediction_pipeline(pipeline, args): predicted_values, errors = ( pipeline | 'Read CSV Files' >> beam.io.ReadFromText(str(args.predict_data), strip_trailing_newlines=True) | 'Batch Input' >> beam.ParDo(EmitAsBatchDoFn(args.batch_size)) | 'Run TF Graph on Batches' >> beam.ParDo(RunGraphDoFn(args.trained_model_dir)).with_outputs('errors', main='main')) ((predicted_values, errors) | 'Format and Save' >> FormatAndSave(args))
Builds the prediction pipeline. Reads the csv files, prepends a ',' if the target column is missing, run prediction, and then prints the formated results to a file. Args: pipeline: the pipeline args: command line args
def didLastExecutedUpgradeSucceeded(self) -> bool: lastEventInfo = self.lastActionEventInfo if lastEventInfo: ev_data = lastEventInfo.data currentPkgVersion = NodeControlUtil.curr_pkg_info(ev_data.pkg_name)[0] if currentPkgVersion: return currentPkgVersion.upstream == ev_data.version else: logger.warning( "{} failed to get information about package {} " "scheduled for last upgrade" .format(self, ev_data.pkg_name) ) return False
Checks last record in upgrade log to find out whether it is about scheduling upgrade. If so - checks whether current version is equals to the one in that record :returns: upgrade execution result
def _pull_player_data(self): player_info = self._retrieve_html_page() if not player_info: return self._parse_player_information(player_info) all_stats = self._combine_all_stats(player_info) setattr(self, '_season', list(all_stats.keys())) return all_stats
Pull and aggregate all player information. Pull the player's HTML stats page and parse unique properties, such as the player's height, weight, and name. Next, combine all stats for all seasons plus the player's career stats into a single object which can easily be iterated upon. Returns ------- dictionary Returns a dictionary of the player's combined stats where each key is a string of the season and the value is the season's associated stats.
def get_config(config_schema, env=None): if env is None: env = os.environ return parser.parse_env( config_schema, env, )
Parse config from the environment against a given schema Args: config_schema: A dictionary mapping keys in the environment to envpy Schema objects describing the expected value. env: An optional dictionary used to override the environment rather than getting it from the os. Returns: A dictionary which maps the values pulled from the environment and parsed against the given schema. Raises: MissingConfigError: A value in the schema with no default could not be found in the environment. ParsingError: A value was found in the environment but could not be parsed into the given value type.
def parse_options(given, available): for key, value in sorted(given.items()): if not value: continue if key in available: yield "--{0}={1}".format(key, value)
Given a set of options, check if available
def export_to_hdf5(network, path, export_standard_types=False, **kwargs): kwargs.setdefault('complevel', 4) basename = os.path.basename(path) with ExporterHDF5(path, **kwargs) as exporter: _export_to_exporter(network, exporter, basename=basename, export_standard_types=export_standard_types)
Export network and components to an HDF store. Both static and series attributes of components are exported, but only if they have non-default values. If path does not already exist, it is created. Parameters ---------- path : string Name of hdf5 file to which to export (if it exists, it is overwritten) **kwargs Extra arguments for pd.HDFStore to specify f.i. compression (default: complevel=4) Examples -------- >>> export_to_hdf5(network, filename) OR >>> network.export_to_hdf5(filename)
def protected_view(view, info): if info.options.get('protected'): def wrapper_view(context, request): response = _advice(request) if response is not None: return response else: return view(context, request) return wrapper_view return view
allows adding `protected=True` to a view_config`
def deps_tree(self): dependencies = self.dependencies + [self.name] if self.repo == "sbo": for dep in dependencies: deps = Requires(flag="").sbo(dep) if dep not in self.deps_dict.values(): self.deps_dict[dep] = Utils().dimensional_list(deps) else: for dep in dependencies: deps = Dependencies(self.repo, self.black).binary(dep, flag="") if dep not in self.deps_dict.values(): self.deps_dict[dep] = Utils().dimensional_list(deps)
Package dependencies image map file
def wait_for_parent_image_build(self, nvr): self.log.info('Waiting for Koji build for parent image %s', nvr) poll_start = time.time() while time.time() - poll_start < self.poll_timeout: build = self.koji_session.getBuild(nvr) if build: self.log.info('Parent image Koji build found with id %s', build.get('id')) if build['state'] != koji.BUILD_STATES['COMPLETE']: exc_msg = ('Parent image Koji build for {} with id {} state is not COMPLETE.') raise KojiParentBuildMissing(exc_msg.format(nvr, build.get('id'))) return build time.sleep(self.poll_interval) raise KojiParentBuildMissing('Parent image Koji build NOT found for {}!'.format(nvr))
Given image NVR, wait for the build that produced it to show up in koji. If it doesn't within the timeout, raise an error. :return build info dict with 'nvr' and 'id' keys
def save(self, fname): out = etree.tostring(self.root, xml_declaration=True, standalone=True, pretty_print=True) with open(fname, 'wb') as fid: fid.write(out)
Save figure to a file
def polymer_to_reference_axis_distances(p, reference_axis, tag=True, reference_axis_name='ref_axis'): if not len(p) == len(reference_axis): raise ValueError( "The reference axis must contain the same number of points " "as the Polymer primitive.") prim_cas = p.primitive.coordinates ref_points = reference_axis.coordinates distances = [distance(prim_cas[i], ref_points[i]) for i in range(len(prim_cas))] if tag: p.tags[reference_axis_name] = reference_axis monomer_tag_name = 'distance_to_{0}'.format(reference_axis_name) for m, d in zip(p._monomers, distances): m.tags[monomer_tag_name] = d return distances
Returns distances between the primitive of a Polymer and a reference_axis. Notes ----- Distances are calculated between each point of the Polymer primitive and the corresponding point in reference_axis. In the special case of the helical barrel, if the Polymer is a helix and the reference_axis represents the centre of the barrel, then this function returns the radius of the barrel at each point on the helix primitive. The points of the primitive and the reference_axis are run through in the same order, so take care with the relative orientation of the reference axis when defining it. Parameters ---------- p : ampal.Polymer reference_axis : list(numpy.array or tuple or list) Length of reference_axis must equal length of the Polymer. Each element of reference_axis represents a point in R^3. tag : bool, optional If True, tags the Chain with the reference axis coordinates and each Residue with its distance to the ref axis. Distances are stored at the Residue level, but refer to distances from the CA atom. reference_axis_name : str, optional Used to name the keys in tags at Chain and Residue level. Returns ------- distances : list(float) Distance values between corresponding points on the reference axis and the `Polymer` `Primitive`. Raises ------ ValueError If the Polymer and the reference_axis have unequal length.
def put_info(self, key, value): return self.instance.put_task_info(self.name, key, value)
Put associated information of the task.
def locations_to_cache(locations, latest=False): cum_cache = lal.Cache() for source in locations: flist = glob.glob(source) if latest: def relaxed_getctime(fn): try: return os.path.getctime(fn) except OSError: return 0 flist = [max(flist, key=relaxed_getctime)] for file_path in flist: dir_name, file_name = os.path.split(file_path) _, file_extension = os.path.splitext(file_name) if file_extension in [".lcf", ".cache"]: cache = lal.CacheImport(file_path) elif file_extension == ".gwf" or _is_gwf(file_path): cache = lalframe.FrOpen(str(dir_name), str(file_name)).cache else: raise TypeError("Invalid location name") cum_cache = lal.CacheMerge(cum_cache, cache) return cum_cache
Return a cumulative cache file build from the list of locations Parameters ---------- locations : list A list of strings containing files, globs, or cache files used to build a combined lal cache file object. latest : Optional, {False, Boolean} Only return a cache with the most recent frame in the locations. If false, all results are returned. Returns ------- cache : lal.Cache A cumulative lal cache object containing the files derived from the list of locations
def component(self, extra_params=None): if self.get('component_id', None): components = self.space.components(id=self['component_id'], extra_params=extra_params) if components: return components[0]
The Component currently assigned to the Ticket
def tag_stuff(self): for item in self.input_stream: if 'tags' not in item: item['tags'] = set() for tag_method in self.tag_methods: item['tags'].add(tag_method(item)) if None in item['tags']: item['tags'].remove(None) yield item
Look through my input stream for the fields to be tagged
def _ensure_value_is_valid(self, value): if not isinstance(value, self.__class__.value_type): raise TypeError('{0} is not valid collection value, instance ' 'of {1} required'.format( value, self.__class__.value_type)) return value
Ensure that value is a valid collection's value.
def _get_request_args(method, **kwargs): args = [ ('api_key', api_key), ('format', 'json'), ('method', method), ('nojsoncallback', '1'), ] if kwargs: for key, value in kwargs.iteritems(): args.append((key, value)) args.sort(key=lambda tup: tup[0]) api_sig = _get_api_sig(args) args.append(api_sig) return args
Use `method` and other settings to produce a flickr API arguments. Here also use json as the return type. :param method: The method provided by flickr, ex: flickr.photosets.getPhotos :type method: str :param kwargs: Other settings :type kwargs: dict :return: An argument list used for post request :rtype: list of sets
def add_xml_declaration(fn): @wraps(fn) def add_xml_declaration_decorator(*args, **kwargs): return '<?xml version="1.0" encoding="UTF-8"?>\n\n' + fn( *args, **kwargs ) return add_xml_declaration_decorator
Decorator to add header with XML version declaration to output from FN.
def _parse_config(self): config = self.get_block('mlag configuration') cfg = dict() cfg.update(self._parse_domain_id(config)) cfg.update(self._parse_local_interface(config)) cfg.update(self._parse_peer_address(config)) cfg.update(self._parse_peer_link(config)) cfg.update(self._parse_shutdown(config)) return dict(config=cfg)
Parses the mlag global configuration Returns: dict: A dict object that is intended to be merged into the resource dict
def from_pydatetime(cls, pydatetime): return cls(date=Date.from_pydate(pydatetime.date), time=Time.from_pytime(pydatetime.time))
Creates sql datetime2 object from Python datetime object ignoring timezone @param pydatetime: Python datetime object @return: sql datetime2 object
def init_logs(args, tool="NanoPlot"): start_time = dt.fromtimestamp(time()).strftime('%Y%m%d_%H%M') logname = os.path.join(args.outdir, args.prefix + tool + "_" + start_time + ".log") handlers = [logging.FileHandler(logname)] if args.verbose: handlers.append(logging.StreamHandler()) logging.basicConfig( format='%(asctime)s %(message)s', handlers=handlers, level=logging.INFO) logging.info('{} {} started with arguments {}'.format(tool, __version__, args)) logging.info('Python version is: {}'.format(sys.version.replace('\n', ' '))) return logname
Initiate log file and log arguments.
def process_geneways_files(input_folder=data_folder, get_evidence=True): gp = GenewaysProcessor(input_folder, get_evidence) return gp
Reads in Geneways data and returns a list of statements. Parameters ---------- input_folder : Optional[str] A folder in which to search for Geneways data. Looks for these Geneways extraction data files: human_action.txt, human_actionmention.txt, human_symbols.txt. Omit this parameter to use the default input folder which is indra/data. get_evidence : Optional[bool] Attempt to find the evidence text for an extraction by downloading the corresponding text content and searching for the given offset in the text to get the evidence sentence. Default: True Returns ------- gp : GenewaysProcessor A GenewaysProcessor object which contains a list of INDRA statements generated from the Geneways action mentions.
def subset_sum(x, R): k = len(x) // 2 Y = [v for v in part_sum(x[:k])] Z = [R - v for v in part_sum(x[k:])] Y.sort() Z.sort() i = 0 j = 0 while i < len(Y) and j < len(Z): if Y[i] == Z[j]: return True elif Y[i] < Z[j]: i += 1 else: j += 1 return False
Subsetsum by splitting :param x: table of values :param R: target value :returns bool: if there is a subsequence of x with total sum R :complexity: :math:`O(n^{\\lceil n/2 \\rceil})`
def save(self, file_path): try: file_path = os.path.abspath(file_path) with open(file_path, 'wb') as df: pickle.dump((self.__data, self.__classes, self.__labels, self.__dtype, self.__description, self.__num_features, self.__feature_names), df) return except IOError as ioe: raise IOError('Unable to save the dataset to file: {}', format(ioe)) except: raise
Method to save the dataset to disk. Parameters ---------- file_path : str File path to save the current dataset to Raises ------ IOError If saving to disk is not successful.
def to_disk(self, path, exclude=tuple(), disable=None): if disable is not None: deprecation_warning(Warnings.W014) exclude = disable path = util.ensure_path(path) serializers = OrderedDict() serializers["tokenizer"] = lambda p: self.tokenizer.to_disk(p, exclude=["vocab"]) serializers["meta.json"] = lambda p: p.open("w").write(srsly.json_dumps(self.meta)) for name, proc in self.pipeline: if not hasattr(proc, "name"): continue if name in exclude: continue if not hasattr(proc, "to_disk"): continue serializers[name] = lambda p, proc=proc: proc.to_disk(p, exclude=["vocab"]) serializers["vocab"] = lambda p: self.vocab.to_disk(p) util.to_disk(path, serializers, exclude)
Save the current state to a directory. If a model is loaded, this will include the model. path (unicode or Path): Path to a directory, which will be created if it doesn't exist. exclude (list): Names of components or serialization fields to exclude. DOCS: https://spacy.io/api/language#to_disk
def start(self): super().start() try: initial = self._get_initial_context() self._stack = ContextCurrifier(self.wrapped, *initial.args, **initial.kwargs) if isconfigurabletype(self.wrapped): try: self.wrapped = self.wrapped(_final=True) except Exception as exc: raise TypeError( "Configurables should be instanciated before execution starts.\nGot {!r}.\n".format( self.wrapped ) ) from exc else: raise TypeError( "Configurables should be instanciated before execution starts.\nGot {!r}.\n".format( self.wrapped ) ) self._stack.setup(self) except Exception: self.fatal(sys.exc_info(), level=0) raise
Starts this context, a.k.a the phase where you setup everything which will be necessary during the whole lifetime of a transformation. The "ContextCurrifier" is in charge of setting up a decorating stack, that includes both services and context processors, and will call the actual node callable with additional parameters.
def _format_command_usage(commands): if not commands: return "" command_usage = "\nCommands:\n" cmd_len = max([len(c) for c in commands] + [8]) command_doc = OrderedDict( [(cmd_name, _get_first_line_of_docstring(cmd_doc)) for cmd_name, cmd_doc in commands.items()]) for cmd_name, cmd_doc in command_doc.items(): command_usage += (" {:%d} {}\n" % cmd_len).format(cmd_name, cmd_doc) return command_usage
Construct the Commands-part of the usage text. Parameters ---------- commands : dict[str, func] dictionary of supported commands. Each entry should be a tuple of (name, function). Returns ------- str Text formatted as a description of the commands.
def get_job_statuses(github_token, api_url, build_id, polling_interval, job_number): auth = get_json('{api_url}/auth/github'.format(api_url=api_url), data={'github_token': github_token})['access_token'] while True: build = get_json('{api_url}/builds/{build_id}'.format( api_url=api_url, build_id=build_id), auth=auth) jobs = [job for job in build['jobs'] if job['number'] != job_number and not job['allow_failure']] if all(job['finished_at'] for job in jobs): break elif any(job['state'] != 'passed' for job in jobs if job['finished_at']): break print('Waiting for jobs to complete: {job_numbers}'.format( job_numbers=[job['number'] for job in jobs if not job['finished_at']])) time.sleep(polling_interval) return [job['state'] == 'passed' for job in jobs]
Wait for all the travis jobs to complete. Once the other jobs are complete, return a list of booleans, indicating whether or not the job was successful. Ignore jobs marked "allow_failure".
def offset(self, index=0): eta = self._geometry[self.camera][index]["ra"] xi = self._geometry[self.camera][index]["dec"] ra = self.origin.ra - (eta/math.cos(self.dec.radian))*units.degree dec = self.origin.dec - xi * units.degree + 45 * units.arcsec self._coordinate = SkyCoord(ra, dec)
Offset the camera pointing to be centred on a particular CCD.
def _init_taxids(taxid, taxids): ret = set() if taxids is not None: if taxids is True: return True if isinstance(taxids, int): ret.add(taxids) else: ret.update(taxids) if taxid is not None: ret.add(taxid) if not ret: ret.add(9606) print('**NOTE: DEFAULT TAXID STORED FROM gene2go IS 9606 (human)\n') return ret
Return taxid set
def tostring(self): parser = etree.XMLParser(remove_blank_text=True) outputtree = etree.XML(etree.tostring(self.__doc), parser) return etree.tostring(outputtree, pretty_print=True)
return a pretty-printed string output for rpc reply
def check_error_response(self, body, status): status_code = int(status.split(' ', 1)[0]) if status_code >= 300: raise errors.BackendError(body, status)
Raise an exception if the response from the backend was an error. Args: body: A string containing the backend response body. status: A string containing the backend response status. Raises: BackendError if the response is an error.
def target(self, project_module): assert isinstance(project_module, basestring) if project_module not in self.module2target: self.module2target[project_module] = \ b2.build.targets.ProjectTarget(project_module, project_module, self.attribute(project_module, "requirements")) return self.module2target[project_module]
Returns the project target corresponding to the 'project-module'.
def _parse_banners(self): motd_value = login_value = None matches = re.findall('^banner\s+(login|motd)\s?$\n(.*?)$\nEOF$\n', self.config, re.DOTALL | re.M) for match in matches: if match[0].strip() == "motd": motd_value = match[1] elif match[0].strip() == "login": login_value = match[1] return dict(banner_motd=motd_value, banner_login=login_value)
Parses the global config and returns the value for both motd and login banners. Returns: dict: The configure value for modtd and login banners. If the banner is not set it will return a value of None for that key. The returned dict object is intendd to be merged into the resource dict
def as_symbols(self): out = set() for name in self.types: out.add(('type', name)) for name in self.enums: out.add(('enum', name)) for name in self.commands: out.add(('command', name)) return out
Set of symbols required by this Require :return: set of ``(symbol type, symbol name)`` tuples