code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def items(self): result = [(key, self._mapping[key]) for key in list(self._queue)] result.reverse() return result
Return a list of items.
def _domain_event_metadata_change_cb(conn, domain, mtype, nsuri, opaque): _salt_send_domain_event(opaque, conn, domain, opaque['event'], { 'type': _get_libvirt_enum_string('VIR_DOMAIN_METADATA_', mtype), 'nsuri': nsuri })
Domain metadata change events handler
def _create_cache_filename(self, source_file): res = self._create_cache_key(source_file) + ".cache" return os.path.join(self.__dir, res)
return the cache file name for a header file. :param source_file: Header file name :type source_file: str :rtype: str
def get_list_subtask_positions_objs(client, list_id): params = { 'list_id' : int(list_id) } response = client.authenticated_request(client.api.Endpoints.SUBTASK_POSITIONS, params=params) return response.json()
Gets all subtask positions objects for the tasks within a given list. This is a convenience method so you don't have to get all the list's tasks before getting subtasks, though I can't fathom how mass subtask reordering is useful. Returns: List of SubtaskPositionsObj-mapped objects representing the order of subtasks for the tasks within the given list
def build(cls: Type[T], data: Generic) -> T: fields = fields_dict(cls) kwargs: Dict[str, Any] = {} for key, value in data.items(): if key in fields: if isinstance(value, Mapping): t = fields[key].type if issubclass(t, Auto): value = t.build(value) else: value = Auto.generate(value, name=key.title()) kwargs[key] = value else: log.debug(f"got unknown attribute {key} for {cls.__name__}") return cls(**kwargs)
Build objects from dictionaries, recursively.
def castable(source, target): op = source.op() value = getattr(op, 'value', None) return dt.castable(source.type(), target.type(), value=value)
Return whether source ir type is implicitly castable to target Based on the underlying datatypes and the value in case of Literals
def standard_program_header(self, title, length, line=32768): self.save_header(self.HEADER_TYPE_BASIC, title, length, param1=line, param2=length)
Generates a standard header block of PROGRAM type
def stopService(self): super(_SiteScheduler, self).stopService() if self.timer is not None: self.timer.cancel() self.timer = None
Stop calling persistent timed events.
def get_addresses_on_both_chains(wallet_obj, used=None, zero_balance=None): mpub = wallet_obj.serialize_b58(private=False) wallet_name = get_blockcypher_walletname_from_mpub( mpub=mpub, subchain_indices=[0, 1], ) wallet_addresses = get_wallet_addresses( wallet_name=wallet_name, api_key=BLOCKCYPHER_API_KEY, is_hd_wallet=True, used=used, zero_balance=zero_balance, coin_symbol=coin_symbol_from_mkey(mpub), ) verbose_print('wallet_addresses:') verbose_print(wallet_addresses) if wallet_obj.private_key: master_key = wallet_obj.serialize_b58(private=True) else: master_key = mpub chains_address_paths_cleaned = [] for chain in wallet_addresses['chains']: if chain['chain_addresses']: chain_address_paths = verify_and_fill_address_paths_from_bip32key( address_paths=chain['chain_addresses'], master_key=master_key, network=guess_network_from_mkey(mpub), ) chain_address_paths_cleaned = { 'index': chain['index'], 'chain_addresses': chain_address_paths, } chains_address_paths_cleaned.append(chain_address_paths_cleaned) return chains_address_paths_cleaned
Get addresses across both subchains based on the filter criteria passed in Returns a list of dicts of the following form: [ {'address': '1abc123...', 'path': 'm/0/9', 'pubkeyhex': '0123456...'}, ..., ] Dicts may also contain WIF and privkeyhex if wallet_obj has private key
def folder_size(pth, ignore=None): if not os.path.isdir(pth): raise exc.FolderNotFound ignore = coerce_to_list(ignore) total = 0 for root, _, names in os.walk(pth): paths = [os.path.realpath(os.path.join(root, nm)) for nm in names] for pth in paths[::-1]: if not os.path.exists(pth): paths.remove(pth) elif match_pattern(pth, ignore): paths.remove(pth) total += sum(os.stat(pth).st_size for pth in paths) return total
Returns the total bytes for the specified path, optionally ignoring any files which match the 'ignore' parameter. 'ignore' can either be a single string pattern, or a list of such patterns.
def save(self, obj, run_id): id_code = self.generate_save_identifier(obj, run_id) self.store.save(obj, id_code)
Save a workflow obj - instance of a workflow to save run_id - unique id to give the run
def _parse_mode(self, mode, allowed=None, single=False): r if type(mode) is str: mode = [mode] for item in mode: if (allowed is not None) and (item not in allowed): raise Exception('\'mode\' must be one of the following: ' + allowed.__str__()) [mode.remove(L) for L in mode if mode.count(L) > 1] if single: if len(mode) > 1: raise Exception('Multiple modes received when only one mode ' + 'allowed') else: mode = mode[0] return mode
r""" This private method is for checking the \'mode\' used in the calling method. Parameters ---------- mode : string or list of strings The mode(s) to be parsed allowed : list of strings A list containing the allowed modes. This list is defined by the calling method. If any of the received modes are not in the allowed list an exception is raised. single : boolean (default is False) Indicates if only a single mode is allowed. If this argument is True than a string is returned rather than a list of strings, which makes it easier to work with in the caller method. Returns ------- A list containing the received modes as strings, checked to ensure they are all within the allowed set (if provoided). Also, if the ``single`` argument was True, then a string is returned.
def merge_rects(rect1, rect2): r = pygame.Rect(rect1) t = pygame.Rect(rect2) right = max(r.right, t.right) bot = max(r.bottom, t.bottom) x = min(t.x, r.x) y = min(t.y, r.y) return pygame.Rect(x, y, right - x, bot - y)
Return the smallest rect containning two rects
def get_containers(self, include_only=[]): locations = self.get_locations() if len(locations) == 0: raise ValueError("No locations for containers exist in Cheminventory") final_locations = [] if include_only: for location in locations: check = location in include_only or location.group in include_only if check: final_locations.append(location) if len(final_locations)==0: raise ValueError(f"Location(s) or group(s) {include_only} is/are not in the database.") else: final_locations = locations containers = [] for location in final_locations: containers += self._get_location_containers(location.inventory_id) return containers
Download all the containers owned by a group Arguments --------- include_only: List containg `Group` or `Location` objects Search only over a list of groups or locations
def _expand_logical_shortcuts(cls, schema): def is_of_rule(x): return isinstance(x, _str_type) and \ x.startswith(('allof_', 'anyof_', 'noneof_', 'oneof_')) for field in schema: for of_rule in (x for x in schema[field] if is_of_rule(x)): operator, rule = of_rule.split('_') schema[field].update({operator: []}) for value in schema[field][of_rule]: schema[field][operator].append({rule: value}) del schema[field][of_rule] return schema
Expand agglutinated rules in a definition-schema. :param schema: The schema-definition to expand. :return: The expanded schema-definition.
def new_config_event(self): try: self.on_set_config() except Exception as ex: self.logger.exception(ex) raise StopIteration()
Called by the event loop when new config is available.
def get_occupied_slots(instance): return [slot for slot in get_all_slots(type(instance)) if hasattr(instance,slot)]
Return a list of slots for which values have been set. (While a slot might be defined, if a value for that slot hasn't been set, then it's an AttributeError to request the slot's value.)
def group_add(self, name, restrict, repos, lces=[], assets=[], queries=[], policies=[], dashboards=[], credentials=[], description=''): return self.raw_query('group', 'add', data={ 'lces': [{'id': i} for i in lces], 'assets': [{'id': i} for i in assets], 'queries': [{'id': i} for i in queries], 'policies': [{'id': i} for i in policies], 'dashboardTabs': [{'id': i} for i in dashboards], 'credentials': [{'id': i} for i in credentials], 'repositories': [{'id': i} for i in repos], 'definingAssets': [{'id': i} for i in restrict], 'name': name, 'description': description, 'users': [], 'context': '' })
group_add name, restrict, repos
def inlink_file(self, filepath): if not os.path.exists(filepath): logger.debug("Creating symbolic link to not existent file %s" % filepath) root, abiext = abi_splitext(filepath) infile = "in_" + abiext infile = self.indir.path_in(infile) self.history.info("Linking path %s --> %s" % (filepath, infile)) if not os.path.exists(infile): os.symlink(filepath, infile) else: if os.path.realpath(infile) != filepath: raise self.Error("infile %s does not point to filepath %s" % (infile, filepath))
Create a symbolic link to the specified file in the directory containing the input files of the task.
def filter(cls, filters, iterable): if isinstance(filters, Filter): filters = [filters] for filter in filters: iterable = filter.generator(iterable) return iterable
Returns the elements in `iterable` that pass given `filters`
def moveaxis(tensor, source, destination): try: source = np.core.numeric.normalize_axis_tuple(source, tensor.ndim) except IndexError: raise ValueError('Source should verify 0 <= source < tensor.ndim' 'Got %d' % source) try: destination = np.core.numeric.normalize_axis_tuple(destination, tensor.ndim) except IndexError: raise ValueError('Destination should verify 0 <= destination < tensor.ndim (%d).' % tensor.ndim, 'Got %d' % destination) if len(source) != len(destination): raise ValueError('`source` and `destination` arguments must have ' 'the same number of elements') order = [n for n in range(tensor.ndim) if n not in source] for dest, src in sorted(zip(destination, source)): order.insert(dest, src) return op.transpose(tensor, order)
Moves the `source` axis into the `destination` position while leaving the other axes in their original order Parameters ---------- tensor : mx.nd.array The array which axes should be reordered source : int or sequence of int Original position of the axes to move. Can be negative but must be unique. destination : int or sequence of int Destination position for each of the original axes. Can be negative but must be unique. Returns ------- result : mx.nd.array Array with moved axes. Examples -------- >>> X = mx.nd.array([[1, 2, 3], [4, 5, 6]]) >>> mx.nd.moveaxis(X, 0, 1).shape (3L, 2L) >>> X = mx.nd.zeros((3, 4, 5)) >>> mx.nd.moveaxis(X, [0, 1], [-1, -2]).shape (5, 4, 3)
def primary_transcript(entrystream, parenttype='gene', logstream=stderr): for entry in entrystream: if not isinstance(entry, tag.Feature): yield entry continue for parent in tag.select.features(entry, parenttype, traverse=True): if parent.num_children == 0: continue transcripts = defaultdict(list) for child in parent.children: if child.type in type_terms: transcripts[child.type].append(child) if len(transcripts) == 0: continue ttypes = list(transcripts.keys()) ttype = _get_primary_type(ttypes, parent) transcript_list = transcripts[ttype] if ttype == 'mRNA': _emplace_pmrna(transcript_list, parent, strict=True) else: _emplace_transcript(transcript_list, parent) yield entry
Select a single transcript as a representative for each gene. This function is a generalization of the `primary_mrna` function that attempts, under certain conditions, to select a single transcript as a representative for each gene. If a gene encodes multiple transcript types, one of those types must be **mRNA** or the function will complain loudly and fail. For mRNAs, the primary transcript is selected according to translated length. For all other transcript types, the length of the transcript feature itself is used. I'd be eager to hear suggestions for alternative selection criteria. Like the `primary_mrna` function, this function **does not** return only transcript features. It **does** modify gene features to ensure that each has at most one transcript feature. >>> reader = tag.GFF3Reader(tag.pkgdata('psyllid-mixed-gene.gff3.gz')) >>> gene_filter = tag.select.features(reader, type='gene') >>> trans_filter = tag.transcript.primary_transcript(gene_filter) >>> for gene in trans_filter: ... assert gene.num_children == 1 In cases where the direct children of a gene feature have heterogenous types, the `primary_mrna` function will only discard mRNA features. This function, however, will discard all direct children of the gene that are not the primary transcript, including non-transcript children. This is a retty subtle distinction, and anecdotal experience suggests that cases in which the distinction actually matters are extremely rare.
def make_geojson(contents): if isinstance(contents, six.string_types): return contents if hasattr(contents, '__geo_interface__'): features = [_geo_to_feature(contents)] else: try: feature_iter = iter(contents) except TypeError: raise ValueError('Unknown type for input') features = [] for i, f in enumerate(feature_iter): if not hasattr(f, '__geo_interface__'): raise ValueError('Unknown type at index {0}'.format(i)) features.append(_geo_to_feature(f)) data = {'type': 'FeatureCollection', 'features': features} return json.dumps(data)
Return a GeoJSON string from a variety of inputs. See the documentation for make_url for the possible contents input. Returns ------- GeoJSON string
async def send(self, message_type, message_content, timeout=None): return await self._sender.send( message_type, message_content, timeout=timeout)
Sends a message and returns a future for the response.
def missing_datetimes(self, finite_datetimes): return [d for d in finite_datetimes if not self._instantiate_task_cls(self.datetime_to_parameter(d)).complete()]
Override in subclasses to do bulk checks. Returns a sorted list. This is a conservative base implementation that brutally checks completeness, instance by instance. Inadvisable as it may be slow.
def add_view_no_menu(self, baseview, endpoint=None, static_folder=None): baseview = self._check_and_init(baseview) log.info(LOGMSG_INF_FAB_ADD_VIEW.format(baseview.__class__.__name__, "")) if not self._view_exists(baseview): baseview.appbuilder = self self.baseviews.append(baseview) self._process_inner_views() if self.app: self.register_blueprint( baseview, endpoint=endpoint, static_folder=static_folder ) self._add_permission(baseview) else: log.warning(LOGMSG_WAR_FAB_VIEW_EXISTS.format(baseview.__class__.__name__)) return baseview
Add your views without creating a menu. :param baseview: A BaseView type class instantiated.
def winrm_cmd(session, command, flags, **kwargs): log.debug('Executing WinRM command: %s %s', command, flags) session.protocol.transport.build_session() r = session.run_cmd(command, flags) return r.status_code
Wrapper for commands to be run against Windows boxes using WinRM.
def view_running_services(self, package: str='') -> str: output, _ = self._execute( '-s', self.device_sn, 'shell', 'dumpsys', 'activity', 'services', package) return output
View running services.
def one(self, filetype, **kwargs): expanded_files = self.expand(filetype, **kwargs) isany = self.any(filetype, **kwargs) return choice(expanded_files) if isany else None
Returns random one of the given type of file Parameters ---------- filetype : str File type parameter. as_url: bool Boolean to return SAS urls refine: str Regular expression string to filter the list of files by before random selection Returns ------- one : str Random file selected from the expanded list of full paths on disk.
def results(self): self.out('cif', self.ctx.cif) if 'group_cif' in self.inputs: self.inputs.group_cif.add_nodes([self.ctx.cif]) if 'group_structure' in self.inputs: try: structure = self.ctx.structure except AttributeError: return self.ctx.exit_code else: self.inputs.group_structure.add_nodes([structure]) self.out('structure', structure) self.report('workchain finished successfully')
If successfully created, add the cleaned `CifData` and `StructureData` as output nodes to the workchain. The filter and select calculations were successful, so we return the cleaned CifData node. If the `group_cif` was defined in the inputs, the node is added to it. If the structure should have been parsed, verify that it is was put in the context by the `parse_cif_structure` step and add it to the group and outputs, otherwise return the finish status that should correspond to the exit code of the `primitive_structure_from_cif` function.
def convert_geojson_to_shapefile(geojson_path): layer = QgsVectorLayer(geojson_path, 'vector layer', 'ogr') if not layer.isValid(): return False shapefile_path = os.path.splitext(geojson_path)[0] + '.shp' QgsVectorFileWriter.writeAsVectorFormat( layer, shapefile_path, 'utf-8', layer.crs(), 'ESRI Shapefile') if os.path.exists(shapefile_path): return True return False
Convert geojson file to shapefile. It will create a necessary file next to the geojson file. It will not affect another files (e.g. .xml, .qml, etc). :param geojson_path: The path to geojson file. :type geojson_path: basestring :returns: True if shapefile layer created, False otherwise. :rtype: bool
def add_tab(self, tab, title='', icon=None): if icon: tab._icon = icon if not hasattr(tab, 'clones'): tab.clones = [] if not hasattr(tab, 'original'): tab.original = None if icon: self.main_tab_widget.addTab(tab, icon, title) else: self.main_tab_widget.addTab(tab, title) self.main_tab_widget.setCurrentIndex( self.main_tab_widget.indexOf(tab)) self.main_tab_widget.show() tab._uuid = self._uuid try: scroll_bar = tab.horizontalScrollBar() except AttributeError: pass else: scroll_bar.setValue(0) tab.setFocus() tab._original_tab_widget = self self._tabs.append(tab) self._on_focus_changed(None, tab)
Adds a tab to main tab widget. :param tab: Widget to add as a new tab of the main tab widget. :param title: Tab title :param icon: Tab icon
def parse_source(info): if "extractor_key" in info: source = info["extractor_key"] lower_source = source.lower() for key in SOURCE_TO_NAME: lower_key = key.lower() if lower_source == lower_key: source = SOURCE_TO_NAME[lower_key] if source != "Generic": return source if "url" in info and info["url"] is not None: p = urlparse(info["url"]) if p and p.netloc: return p.netloc return "Unknown"
Parses the source info from an info dict generated by youtube-dl Args: info (dict): The info dict to parse Returns: source (str): The source of this song
def historical_pandas_yahoo(symbol, source='yahoo', start=None, end=None): return DataReader(symbol, source, start=start, end=end)
Fetch from yahoo! finance historical quotes
def _build_web_client(cls, session: AppSession): cookie_jar = cls._build_cookie_jar(session) http_client = cls._build_http_client(session) redirect_factory = functools.partial( session.factory.class_map['RedirectTracker'], max_redirects=session.args.max_redirect ) return session.factory.new( 'WebClient', http_client, redirect_tracker_factory=redirect_factory, cookie_jar=cookie_jar, request_factory=cls._build_request_factory(session), )
Build Web Client.
def expect_equal(first, second, msg=None, extras=None): try: asserts.assert_equal(first, second, msg, extras) except signals.TestSignal as e: logging.exception('Expected %s equals to %s, but they are not.', first, second) recorder.add_error(e)
Expects the equality of objects, otherwise fail the test. If the expectation is not met, the test is marked as fail after its execution finishes. Error message is "first != second" by default. Additional explanation can be supplied in the message. Args: first: The first object to compare. second: The second object to compare. msg: A string that adds additional info about the failure. extras: An optional field for extra information to be included in test result.
def find_field(browser, field, value): return find_field_by_id(browser, field, value) + \ find_field_by_name(browser, field, value) + \ find_field_by_label(browser, field, value)
Locate an input field of a given value This first looks for the value as the id of the element, then the name of the element, then a label for the element.
def scipy_psd(x, f_sample=1.0, nr_segments=4): f_axis, psd_of_x = scipy.signal.welch(x, f_sample, nperseg=len(x)/nr_segments) return f_axis, psd_of_x
PSD routine from scipy we can compare our own numpy result against this one
def glob_config(pattern, *search_dirs): patterns = config_search_paths(pattern, *search_dirs, check_exists=False) for pattern in patterns: for path in glob.iglob(pattern): yield path
Return glob results for all possible configuration locations. Note: This method does not check the configuration "base" directory if the pattern includes a subdirectory. This is done for performance since this is usually used to find *all* configs for a certain component.
def show(self, msg, indent=0, style="", **kwargs): if self.enable_verbose: new_msg = self.MessageTemplate.with_style.format( indent=self.tab * indent, style=style, msg=msg, ) print(new_msg, **kwargs)
Print message to console, indent format may apply.
def call(self, task, decorators=None): if decorators is None: decorators = [] task = self.apply_task_decorators(task, decorators) data = task.get_data() name = task.get_name() result = self._inner_call(name, data) task_result = RawTaskResult(task, result) return self.apply_task_result_decorators(task_result, decorators)
Call given task on service layer. :param task: task to be called. task will be decorated with TaskDecorator's contained in 'decorators' list :type task: instance of Task class :param decorators: list of TaskDecorator's / TaskResultDecorator's inherited classes :type decorators: list :return task_result: result of task call decorated with TaskResultDecorator's contained in 'decorators' list :rtype TaskResult instance
def warning(message, css_path=CSS_PATH): env = Environment() env.loader = FileSystemLoader(osp.join(CONFDIR_PATH, 'templates')) warning = env.get_template("warning.html") return warning.render(css_path=css_path, text=message)
Print a warning message on the rich text view
def save(self, force=False): if (not self._success) and (not force): raise ConfigError(( 'The config file appears to be corrupted:\n\n' ' {fname}\n\n' 'Before attempting to save the configuration, please either ' 'fix the config file manually, or overwrite it with a blank ' 'configuration as follows:\n\n' ' from dustmaps.config import config\n' ' config.reset()\n\n' ).format(fname=self.fname)) with open(self.fname, 'w') as f: json.dump(self._options, f, indent=2)
Saves the configuration to a JSON, in the standard config location. Args: force (Optional[:obj:`bool`]): Continue writing, even if the original config file was not loaded properly. This is dangerous, because it could cause the previous configuration options to be lost. Defaults to :obj:`False`. Raises: :obj:`ConfigError`: if the configuration file was not successfully loaded on initialization of the class, and :obj:`force` is :obj:`False`.
def set_categories(self): self.categories = [] temp_categories = self.soup.findAll('category') for category in temp_categories: category_text = category.string self.categories.append(category_text)
Parses and set feed categories
def list_parameter_ranges(self, parameter, start=None, stop=None, min_gap=None, max_gap=None, parameter_cache='realtime'): path = '/archive/{}/parameters{}/ranges'.format( self._instance, parameter) params = {} if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if min_gap is not None: params['minGap'] = int(min_gap * 1000) if max_gap is not None: params['maxGap'] = int(max_gap * 1000) if parameter_cache: params['processor'] = parameter_cache else: params['norealtime'] = True response = self._client.get_proto(path=path, params=params) message = pvalue_pb2.Ranges() message.ParseFromString(response.content) ranges = getattr(message, 'range') return [ParameterRange(r) for r in ranges]
Returns parameter ranges between the specified start and stop time. Each range indicates an interval during which this parameter's value was uninterrupted and unchanged. Ranges are a good fit for retrieving the value of a parameter that does not change frequently. For example an on/off indicator or some operational status. Querying ranges will then induce much less overhead than manually processing the output of :meth:`list_parameter_values` would. The maximum number of returned ranges is limited to 500. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the considered values (inclusive) :param ~datetime.datetime stop: Maximum generation time of the considered values (exclusive) :param float min_gap: Time in seconds. Any gap (detected based on parameter expiration) smaller than this will be ignored. However if the parameter changes value, the ranges will still be split. :param float max_gap: Time in seconds. If the distance between two subsequent parameter values is bigger than this value (but smaller than the parameter expiration), then an artificial gap is created. This also applies if there is no expiration defined for the parameter. :param str parameter_cache: Specify the name of the processor who's parameter cache is merged with already archived values. To disable results from the parameter cache, set this to ``None``. :rtype: .ParameterRange[]
def could_collide_hor(self, vpos, adsb_pkt): margin = self.asterix_settings.filter_dist_xy timeout = self.asterix_settings.filter_time alat = adsb_pkt.lat * 1.0e-7 alon = adsb_pkt.lon * 1.0e-7 avel = adsb_pkt.hor_velocity * 0.01 vvel = sqrt(vpos.vx**2 + vpos.vy**2) dist = mp_util.gps_distance(vpos.lat, vpos.lon, alat, alon) dist -= avel * timeout dist -= vvel * timeout if dist <= margin: return True return False
return true if vehicle could come within filter_dist_xy meters of adsb vehicle in timeout seconds
def sort(self, key, by=None, external=None, offset=0, limit=None, order=None, alpha=False, store_as=None): if order and order not in [b'ASC', b'DESC', 'ASC', 'DESC']: raise ValueError('invalid sort order "{}"'.format(order)) command = [b'SORT', key] if by: command += [b'BY', by] if external and isinstance(external, list): for entry in external: command += [b'GET', entry] elif external: command += [b'GET', external] if limit: command += [ b'LIMIT', ascii(offset).encode('utf-8'), ascii(limit).encode('utf-8') ] if order: command.append(order) if alpha is True: command.append(b'ALPHA') if store_as: command += [b'STORE', store_as] return self._execute(command)
Returns or stores the elements contained in the list, set or sorted set at key. By default, sorting is numeric and elements are compared by their value interpreted as double precision floating point number. The ``external`` parameter is used to specify the `GET <http://redis.io/commands/sort#retrieving-external-keys>_` parameter for retrieving external keys. It can be a single string or a list of strings. .. note:: **Time complexity**: ``O(N+M*log(M))`` where ``N`` is the number of elements in the list or set to sort, and ``M`` the number of returned elements. When the elements are not sorted, complexity is currently ``O(N)`` as there is a copy step that will be avoided in next releases. :param key: The key to get the refcount for :type key: :class:`str`, :class:`bytes` :param by: The optional pattern for external sorting keys :type by: :class:`str`, :class:`bytes` :param external: Pattern or list of patterns to return external keys :type external: :class:`str`, :class:`bytes`, list :param int offset: The starting offset when using limit :param int limit: The number of elements to return :param order: The sort order - one of ``ASC`` or ``DESC`` :type order: :class:`str`, :class:`bytes` :param bool alpha: Sort the results lexicographically :param store_as: When specified, the key to store the results as :type store_as: :class:`str`, :class:`bytes`, None :rtype: list|int :raises: :exc:`~tredis.exceptions.RedisError` :raises: :exc:`ValueError`
def _expect_extra(expected, present, exc_unexpected, exc_missing, exc_args): if present: if not expected: raise exc_unexpected(*exc_args) elif expected and expected is not Argument.ignore: raise exc_missing(*exc_args)
Checks for the presence of an extra to the argument list. Raises expections if this is unexpected or if it is missing and expected.
def is_selected(self, model): if model is None: return len(self._selected) == 0 return model in self._selected
Checks whether the given model is selected :param model: :return: True if the model is within the selection, False else :rtype: bool
def _locate_point(nodes, point): r candidates = [(0.0, 1.0, nodes)] for _ in six.moves.xrange(_MAX_LOCATE_SUBDIVISIONS + 1): next_candidates = [] for start, end, candidate in candidates: if _helpers.contains_nd(candidate, point.ravel(order="F")): midpoint = 0.5 * (start + end) left, right = subdivide_nodes(candidate) next_candidates.extend( ((start, midpoint, left), (midpoint, end, right)) ) candidates = next_candidates if not candidates: return None params = [(start, end) for start, end, _ in candidates] if np.std(params) > _LOCATE_STD_CAP: raise ValueError("Parameters not close enough to one another", params) s_approx = np.mean(params) s_approx = newton_refine(nodes, point, s_approx) if s_approx < 0.0: return 0.0 elif s_approx > 1.0: return 1.0 else: return s_approx
r"""Locate a point on a curve. Does so by recursively subdividing the curve and rejecting sub-curves with bounding boxes that don't contain the point. After the sub-curves are sufficiently small, uses Newton's method to zoom in on the parameter value. .. note:: This assumes, but does not check, that ``point`` is ``D x 1``, where ``D`` is the dimension that ``curve`` is in. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: nodes (numpy.ndarray): The nodes defining a B |eacute| zier curve. point (numpy.ndarray): The point to locate. Returns: Optional[float]: The parameter value (:math:`s`) corresponding to ``point`` or :data:`None` if the point is not on the ``curve``. Raises: ValueError: If the standard deviation of the remaining start / end parameters among the subdivided intervals exceeds a given threshold (e.g. :math:`2^{-20}`).
def _calc_order(self, order): if order is not None and order != '': self.order = order.upper() else: shape = self.shape if len(shape) <= 2: self.order = 'M' else: depth = shape[-1] if depth == 1: self.order = 'M' elif depth == 2: self.order = 'AM' elif depth == 3: self.order = 'RGB' elif depth == 4: self.order = 'RGBA'
Called to set the order of a multi-channel image. The order should be determined by the loader, but this will make a best guess if passed `order` is `None`.
def encode(self, boundary): if self.value is None: value = self.fileobj.read() else: value = self.value if re.search("^--%s$" % re.escape(boundary), value, re.M): raise ValueError("boundary found in encoded string") return "%s%s\r\n" % (self.encode_hdr(boundary), value)
Returns the string encoding of this parameter
def _writeCloseFrame(self, reason=DISCONNECT.GO_AWAY): self.transport.writeClose(reason) self.transport.loseConnection() self.transport = None
Write a close frame with the given reason and schedule this connection close.
def check_paths(self, paths, update=None): exclude = [] if not self.exclude else self.exclude \ if isinstance(self.exclude, list) else [self.exclude] for pathname, path in paths.items(): if update and pathname.upper() not in exclude: os.environ[pathname.upper()] = os.path.normpath(path) elif pathname.upper() not in os.environ: os.environ[pathname.upper()] = os.path.normpath(path)
Check if the path is in the os environ, and if not add it Paramters: paths (OrderedDict): An ordered dict containing all of the paths from the a given section, as key:val = name:path update (bool): If True, overwrites existing tree environment variables in your local environment. Default is False.
def benchmark(store, n=10000): x = UpdatableItem(store=store, count=0) for _ in xrange(n): x.count += 1
Increments an integer count n times.
def fullData(master): builders = [] for b in master.config.builders: steps = [] for step in b.factory.steps: steps.append(getName(step)) builders.append(steps) return {'builders': builders}
Send the actual configuration of the builders, how the steps are agenced. Note that full data will never send actual detail of what command is run, name of servers, etc.
def read(cls, *criteria, **kwargs): if not kwargs.get('removed', False): return cls.query.filter(cls.time_removed == 0, *criteria) return cls.query.filter(*criteria)
filter query helper that handles soft delete logic. If your query conditions do not require expressions, consider using read_by. :param criteria: where clause conditions :param kwargs: set removed=True if you want soft-deleted rows :return: row object generator
def function_call_action(self, text, loc, fun): exshared.setpos(loc, text) if DEBUG > 0: print("FUN_CALL:",fun) if DEBUG == 2: self.symtab.display() if DEBUG > 2: return if len(self.function_arguments) != self.symtab.get_attribute(self.function_call_index): raise SemanticException("Wrong number of arguments for function '%s'" % fun.name) self.function_arguments.reverse() self.codegen.function_call(self.function_call_index, self.function_arguments) self.codegen.restore_used_registers() return_type = self.symtab.get_type(self.function_call_index) self.function_call_index = self.function_call_stack.pop() self.function_arguments = self.function_arguments_stack.pop() register = self.codegen.take_register(return_type) self.codegen.move(self.codegen.take_function_register(return_type), register) return register
Code executed after recognising the whole function call
def get_response(self, results): if results is None: return None for result in itertools.chain.from_iterable(results): if isinstance(result, BaseResponse): return result
Get response object from results. :param results: list :return:
def close(self, terminate=True, kill=False): if not self.closed: if self.child_fd is not None: os.close(self.child_fd) self.child_fd = None if self.child_fde is not None: os.close(self.child_fde) self.child_fde = None time.sleep(0.1) if terminate: if not self.terminate(kill): raise TerminalException('Failed to terminate child process.') self.closed = True
Close the communication with the terminal's child. If ``terminate`` is ``True`` then additionally try to terminate the terminal, and if ``kill`` is also ``True``, kill the terminal if terminating it was not enough.
def load_data_file( file_path, file_path_is_relative=False, comment_string=DATA_FILE_COMMENT, field_separator=DATA_FILE_FIELD_SEPARATOR, line_format=None ): raw_tuples = [] if file_path_is_relative: file_path = os.path.join(os.path.dirname(__file__), file_path) with io.open(file_path, "r", encoding="utf-8") as f: for line in f: line = line.strip() if (len(line) > 0) and (not line.startswith(comment_string)): raw_list = line.split(field_separator) if len(raw_list) != len(line_format): raise ValueError("Data file '%s' contains a bad line: '%s'" % (file_path, line)) raw_tuples.append(tuple(raw_list)) if (line_format is None) or (len(line_format) < 1): return raw_tuples return [convert_raw_tuple(t, line_format) for t in raw_tuples]
Load a data file, with one record per line and fields separated by ``field_separator``, returning a list of tuples. It ignores lines starting with ``comment_string`` or empty lines. If ``values_per_line`` is not ``None``, check that each line (tuple) has the prescribed number of values. :param str file_path: path of the data file to load :param bool file_path_is_relative: if ``True``, ``file_path`` is relative to this source code file :param str comment_string: ignore lines starting with this string :param str field_separator: fields are separated by this string :param str line_format: if not ``None``, parses each line according to the given format (``s`` = string, ``S`` = split string using spaces, ``i`` = int, ``x`` = ignore, ``U`` = Unicode, ``A`` = ASCII) :rtype: list of tuples
def _get_info(self, host, port, unix_socket, auth): client = self._client(host, port, unix_socket, auth) if client is None: return None info = client.info() del client return info
Return info dict from specified Redis instance :param str host: redis host :param int port: redis port :rtype: dict
def _rule_as_string(self, rule): if isinstance(rule, RuleSet): return '%s{%s}' % ( self._selector_as_string(rule.selector), self._declarations_as_string(rule.declarations)) elif isinstance(rule, ImportRule): return "@import url('%s') %s;" % ( rule.uri, ','.join(rule.media)) elif isinstance(rule, FontFaceRule): return "@font-face{%s}" % self._declarations_as_string(rule.declarations) elif isinstance(rule, MediaRule): return "@media %s{%s}" % ( ','.join(rule.media), ''.join(self._rule_as_string(r) for r in rule.rules)) elif isinstance(rule, PageRule): selector, pseudo = rule.selector return "@page%s%s{%s}" % ( ' %s' % selector if selector else '', ' :%s' % pseudo if pseudo else '', self._declarations_as_string(rule.declarations)) return ''
Converts a tinycss rule to a formatted CSS string :param rule: The rule to format :type rule: tinycss Rule object :returns: The Rule as a CSS string :rtype: str
def do_batch_status(args): rest_client = RestClient(args.url, args.user) batch_ids = args.batch_ids.split(',') if args.wait and args.wait > 0: statuses = rest_client.get_statuses(batch_ids, args.wait) else: statuses = rest_client.get_statuses(batch_ids) if args.format == 'yaml': fmt.print_yaml(statuses) elif args.format == 'json': fmt.print_json(statuses) else: raise AssertionError('Missing handler: {}'.format(args.format))
Runs the batch-status command, printing output to the console Args: args: The parsed arguments sent to the command at runtime
def process_openxml_file(filename: str, print_good: bool, delete_if_bad: bool) -> None: print_bad = not print_good try: file_good = is_openxml_good(filename) file_bad = not file_good if (print_good and file_good) or (print_bad and file_bad): print(filename) if delete_if_bad and file_bad: log.warning("Deleting: {}", filename) os.remove(filename) except Exception as e: log.critical("Uncaught error in subprocess: {!r}\n{}", e, traceback.format_exc()) raise
Prints the filename of, or deletes, an OpenXML file depending on whether it is corrupt or not. Args: filename: filename to check print_good: if ``True``, then prints the filename if the file appears good. delete_if_bad: if ``True``, then deletes the file if the file appears corrupt.
def zpopmin(self, key, count=None, *, encoding=_NOTSET): if count is not None and not isinstance(count, int): raise TypeError("count argument must be int") args = [] if count is not None: args.extend([count]) fut = self.execute(b'ZPOPMIN', key, *args, encoding=encoding) return fut
Removes and returns up to count members with the lowest scores in the sorted set stored at key. :raises TypeError: if count is not int
def _get_http_args(self, params): headers = self.http_args.get('headers', {}) if self.auth is not None: auth_headers = self.auth.get_headers() headers.update(auth_headers) http_args = self.http_args.copy() if self._source_id is not None: headers['source_id'] = self._source_id http_args['headers'] = headers merged_params = http_args.get('params', {}) merged_params.update(params) http_args['params'] = merged_params return http_args
Return a copy of the http_args Adds auth headers and 'source_id', merges in params.
def delete_mirror(name, config_path=_DEFAULT_CONFIG_PATH, force=False): _validate_config(config_path) force = six.text_type(bool(force)).lower() current_mirror = __salt__['aptly.get_mirror'](name=name, config_path=config_path) if not current_mirror: log.debug('Mirror already absent: %s', name) return True cmd = ['mirror', 'drop', '-config={}'.format(config_path), '-force={}'.format(force), name] _cmd_run(cmd) mirror = __salt__['aptly.get_mirror'](name=name, config_path=config_path) if mirror: log.error('Unable to remove mirror: %s', name) return False log.debug('Removed mirror: %s', name) return True
Remove a mirrored remote repository. By default, Package data is not removed. :param str name: The name of the remote repository mirror. :param str config_path: The path to the configuration file for the aptly instance. :param bool force: Whether to remove the mirror even if it is used as the source of an existing snapshot. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' aptly.delete_mirror name="test-mirror"
def to_carrier(self, span_context, carrier): carrier[_TRACE_ID_KEY] = str(span_context.trace_id) if span_context.span_id is not None: carrier[_SPAN_ID_KEY] = str(span_context.span_id) carrier[_TRACE_OPTIONS_KEY] = str( span_context.trace_options.trace_options_byte) return carrier
Inject the SpanContext fields to carrier dict. :type span_context: :class:`~opencensus.trace.span_context.SpanContext` :param span_context: SpanContext object. :type carrier: dict :param carrier: The carrier which holds the trace_id, span_id, options information from a SpanContext. :rtype: dict :returns: The carrier which holds the span context information.
def get_default_classes(self): default_classes = super(TabGroup, self).get_default_classes() default_classes.extend(CSS_TAB_GROUP_CLASSES) return default_classes
Returns a list of the default classes for the tab group. Defaults to ``["nav", "nav-tabs", "ajax-tabs"]``.
def getcoords(ddtt): n_vertices_index = ddtt.objls.index('Number_of_Vertices') first_x = n_vertices_index + 1 pts = ddtt.obj[first_x:] return list(grouper(3, pts))
return the coordinates of the surface
def _set_last_aid(func): @functools.wraps(func) def new_func(self, *args, **kwargs): aid = func(self, *args, **kwargs) self.last_aid = aid return aid return new_func
Decorator for setting last_aid.
def list(self): if self.is_fake: return for item in self.collection.list(): yield item.uid + self.content_suffix
List collection items.
def FormatDescriptorToPython(i): i = i.replace("/", "_") i = i.replace(";", "") i = i.replace("[", "") i = i.replace("(", "") i = i.replace(")", "") i = i.replace(" ", "") i = i.replace("$", "") return i
Format a descriptor into a form which can be used as a python attribute example:: >>> FormatDescriptorToPython('(Ljava/lang/Long; Ljava/lang/Long; Z Z)V') 'Ljava_lang_LongLjava_lang_LongZZV :param i: name to transform :rtype: str
def get_person_by_regid(self, regid): if not self.valid_uwregid(regid): raise InvalidRegID(regid) url = "{}/{}/full.json".format(PERSON_PREFIX, regid.upper()) response = DAO.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) return self._person_from_json(response.data)
Returns a restclients.Person object for the given regid. If the regid isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown.
def tracefunc_xml(func): funcname = meta_util_six.get_funcname(func) def wrp_tracefunc2(*args, **kwargs): verbose = kwargs.get('verbose', True) if verbose: print('<%s>' % (funcname,)) with util_print.Indenter(' '): ret = func(*args, **kwargs) if verbose: print('</%s>' % (funcname,)) return ret wrp_tracefunc2_ = ignores_exc_tb(wrp_tracefunc2) wrp_tracefunc2_ = preserve_sig(wrp_tracefunc2_, func) return wrp_tracefunc2_
Causes output of function to be printed in an XML style block
def pageLeft(self): targetIdx = self.leftVisibleColIndex firstNonKeyVisibleColIndex = self.visibleCols.index(self.nonKeyVisibleCols[0]) while self.rightVisibleColIndex != targetIdx and self.leftVisibleColIndex > firstNonKeyVisibleColIndex: self.cursorVisibleColIndex -= 1 self.leftVisibleColIndex -= 1 self.calcColLayout() if self.rightVisibleColIndex == self.nVisibleCols-1: while self.leftVisibleColIndex > 0: rightcol = self.visibleCols[self.rightVisibleColIndex] if rightcol.width > self.visibleColLayout[self.rightVisibleColIndex][1]: self.cursorVisibleColIndex += 1 self.leftVisibleColIndex += 1 break else: self.cursorVisibleColIndex -= 1 self.leftVisibleColIndex -= 1 self.calcColLayout()
Redraw page one screen to the left. Note: keep the column cursor in the same general relative position: - if it is on the furthest right column, then it should stay on the furthest right column if possible - likewise on the left or in the middle So really both the `leftIndex` and the `cursorIndex` should move in tandem until things are correct.
def new(name): vi = vips_lib.vips_interpolate_new(_to_bytes(name)) if vi == ffi.NULL: raise Error('no such interpolator {0}'.format(name)) return Interpolate(vi)
Make a new interpolator by name. Make a new interpolator from the libvips class nickname. For example:: inter = pyvips.Interpolator.new('bicubic') You can get a list of all supported interpolators from the command-line with:: $ vips -l interpolate See for example :meth:`.affine`.
def milestone(self, column=None, value=None, **kwargs): return self._resolve_call('GIC_MILESTONE', column, value, **kwargs)
Status codes and related dates of certain grants, >>> GICS().milestone('milestone_date', '16-MAR-01')
def hexists(self, hashkey, attribute): redis_hash = self._get_hash(hashkey, 'HEXISTS') return self._encode(attribute) in redis_hash
Emulate hexists.
def to_json(self): return { 'st_month': self.st_month, 'st_day': self.st_day, 'st_hour': self.st_hour, 'end_month': self.end_month, 'end_day': self.end_day, 'end_hour': self.end_hour, 'timestep': self.timestep, 'is_leap_year': self.is_leap_year }
Convert the analysis period to a dictionary.
def _def_check(self): if self._def != dict(): for key, val in iteritems_(self._def): if key not in list(TEXT_INDEX_ARGS.keys()): raise CloudantArgumentError(127, key) if not isinstance(val, TEXT_INDEX_ARGS[key]): raise CloudantArgumentError(128, key, TEXT_INDEX_ARGS[key])
Checks that the definition provided contains only valid arguments for a text index.
def read_csv(filename): field_names = ('latitude', 'longitude', 'name') data = utils.prepare_csv_read(filename, field_names, skipinitialspace=True) locations = {} args = [] for index, row in enumerate(data, 1): name = '%02i:%s' % (index, row['name']) locations[name] = (row['latitude'], row['longitude']) args.append(name) return locations, args
Pull locations from a user's CSV file. Read gpsbabel_'s CSV output format .. _gpsbabel: http://www.gpsbabel.org/ Args: filename (str): CSV file to parse Returns: tuple of dict and list: List of locations as ``str`` objects
def _remove_tree(self, tree, parent=None): for sub_tree in tree.sub_trees: self._remove_tree(sub_tree, parent=tree) for index in tree.indexes: if not getattr(tree, index): continue self._remove_from( getattr(self, index + "_db"), getattr(tree, index), tree, ) if parent: self._remove_from(self.parent_db, tree.path, parent) self.zeo.pack()
Really remove the tree identified by `tree` instance from all indexes from database. Args: tree (obj): :class:`.Tree` instance. parent (obj, default None): Reference to parent.
def Definition(self): result = self._FormatDescriptionComment() result += " enum %s {\n" % self.enum_name for k, v in sorted(iteritems(self.reverse_enum)): result += " %s = %s;\n" % (v, k) result += " }\n" result += self._FormatField() return result
Return a string with the definition of this field.
def execute_request(self, url, http_method, query_params, post_data): response = requests.request(http_method, url, params=query_params, auth=self._auth, json=post_data, headers={'User-Agent': USER_AGENT}) if isinstance(self._output_generator, str) and self._output_generator.lower() == "json": return response.json() elif self._output_generator is not None: return self._output_generator.process_response(response) else: return response
Makes a request to the specified url endpoint with the specified http method, params and post data. Args: url (string): The url to the API without query params. Example: "https://api.housecanary.com/v2/property/value" http_method (string): The http method to use for the request. query_params (dict): Dictionary of query params to add to the request. post_data: Json post data to send in the body of the request. Returns: The result of calling this instance's OutputGenerator process_response method on the requests.Response object. If no OutputGenerator is specified for this instance, returns the requests.Response.
def parse(self, data): ASNlist = [] for line in data.splitlines()[1:]: line = plain_str(line) if "|" not in line: continue asn, ip, desc = [elt.strip() for elt in line.split('|')] if asn == "NA": continue asn = "AS%s" % asn ASNlist.append((ip, asn, desc)) return ASNlist
Parse bulk cymru data
def list_commands(self, ctx): rv = [] files = [_ for _ in next(os.walk(self.folder))[2] if not _.startswith("_") and _.endswith(".py")] for filename in files: rv.append(filename[:-3]) rv.sort() return rv
List commands from folder.
def create_event_subscription(self, url): params = {'callbackUrl': url} response = self._do_request('POST', '/v2/eventSubscriptions', params) return response.json()
Register a callback URL as an event subscriber. :param str url: callback URL :returns: the created event subscription :rtype: dict
def assign_vertex_attrib_location(self, vbo, location): with vbo: if self.n_verts: assert vbo.data.shape[0] == self.n_verts else: self.n_verts = vbo.data.shape[0] gl.glVertexAttribPointer(location, vbo.data.shape[1], gl.GL_FLOAT, gl.GL_FALSE, 0, 0) gl.glEnableVertexAttribArray(location)
Load data into a vbo
def team_robots(self, team): return [Robot(raw) for raw in self._get('team/%s/robots' % self.team_key(team))]
Get data about a team's robots. :param team: Key for team whose robots you want data on. :return: List of Robot objects
def send(self, msg, timeout=None): arb_id = msg.arbitration_id if msg.is_extended_id: arb_id |= NC_FL_CAN_ARBID_XTD raw_msg = TxMessageStruct(arb_id, bool(msg.is_remote_frame), msg.dlc, CanData(*msg.data)) nican.ncWrite( self.handle, ctypes.sizeof(raw_msg), ctypes.byref(raw_msg))
Send a message to NI-CAN. :param can.Message msg: Message to send :raises can.interfaces.nican.NicanError: If writing to transmit buffer fails. It does not wait for message to be ACKed currently.
def ensure_specification_cols_are_in_dataframe(specification, dataframe): try: assert isinstance(specification, OrderedDict) except AssertionError: raise TypeError("`specification` must be an OrderedDict.") assert isinstance(dataframe, pd.DataFrame) problem_cols = [] dataframe_cols = dataframe.columns for key in specification: if key not in dataframe_cols: problem_cols.append(key) if problem_cols != []: msg = "The following keys in the specification are not in 'data':\n{}" raise ValueError(msg.format(problem_cols)) return None
Checks whether each column in `specification` is in `dataframe`. Raises ValueError if any of the columns are not in the dataframe. Parameters ---------- specification : OrderedDict. Keys are a proper subset of the columns in `data`. Values are either a list or a single string, "all_diff" or "all_same". If a list, the elements should be: - single objects that are in the alternative ID column of `data` - lists of objects that are within the alternative ID column of `data`. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the `specification` values, a single column will be created for all the alternatives within the iterable (i.e. there will be one common coefficient for the variables in the iterable). dataframe : pandas DataFrame. Dataframe containing the data for the choice model to be estimated. Returns ------- None.
def weight_statistics(self): all_weights = [d.get('weight', None) for u, v, d in self.graph.edges(data=True)] stats = describe(all_weights, nan_policy='omit') return { 'all_weights': all_weights, 'min': stats.minmax[0], 'max': stats.minmax[1], 'mean': stats.mean, 'variance': stats.variance }
Extract a statistical summary of edge weights present in the graph. :return: A dict with an 'all_weights' list, 'minimum', 'maximum', 'median', 'mean', 'std_dev'
def add_f90_to_env(env): try: F90Suffixes = env['F90FILESUFFIXES'] except KeyError: F90Suffixes = ['.f90'] try: F90PPSuffixes = env['F90PPFILESUFFIXES'] except KeyError: F90PPSuffixes = [] DialectAddToEnv(env, "F90", F90Suffixes, F90PPSuffixes, support_module = 1)
Add Builders and construction variables for f90 to an Environment.
def loads(schema_str): try: if sys.version_info[0] < 3: return schema.parse(schema_str) else: return schema.Parse(schema_str) except schema.SchemaParseException as e: raise ClientError("Schema parse failed: %s" % (str(e)))
Parse a schema given a schema string
def bootstraps(self, _args): for bs in Bootstrap.list_bootstraps(): bs = Bootstrap.get_bootstrap(bs, self.ctx) print('{Fore.BLUE}{Style.BRIGHT}{bs.name}{Style.RESET_ALL}' .format(bs=bs, Fore=Out_Fore, Style=Out_Style)) print(' {Fore.GREEN}depends: {bs.recipe_depends}{Fore.RESET}' .format(bs=bs, Fore=Out_Fore))
List all the bootstraps available to build with.
def create_textview(self, wrap_mode=Gtk.WrapMode.WORD_CHAR, justify=Gtk.Justification.LEFT, visible=True, editable=True): text_view = Gtk.TextView() text_view.set_wrap_mode(wrap_mode) text_view.set_editable(editable) if not editable: text_view.set_cursor_visible(False) else: text_view.set_cursor_visible(visible) text_view.set_justification(justify) return text_view
Function creates a text view with wrap_mode and justification
def disable(self): self.post("disable") if self.service.restart_required: self.service.restart(120) return self
Disables the entity at this endpoint.
def PrintFeed(feed): import gdata for i, entry in enumerate(feed.entry): if isinstance(feed, gdata.spreadsheet.SpreadsheetsCellsFeed): print '%s %s\n' % (entry.title.text, entry.content.text) elif isinstance(feed, gdata.spreadsheet.SpreadsheetsListFeed): print '%s %s %s' % (i, entry.title.text, entry.content.text) print 'Contents:' for key in entry.custom: print ' %s: %s' % (key, entry.custom[key].text) print '\n', else: print '%s %s\n' % (i, entry.title.text)
Example function from Google to print a feed