code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_output(self, style=OutputStyle.file): return self.manager.get_output(style=style)
Returns the result of all previous calls to execute_code.
def get_nagios_unit_name(relation_name='nrpe-external-master'): host_context = get_nagios_hostcontext(relation_name) if host_context: unit = "%s:%s" % (host_context, local_unit()) else: unit = local_unit() return unit
Return the nagios unit name prepended with host_context if needed :param str relation_name: Name of relation nrpe sub joined to
def _get_bq_service(credentials=None, service_url=None): assert credentials, 'Must provide ServiceAccountCredentials' http = credentials.authorize(Http()) service = build( 'bigquery', 'v2', http=http, discoveryServiceUrl=service_url, cache_discovery=False ) return service
Construct an authorized BigQuery service object.
def filter_rows(self, filters, rows): ret = [] for row in rows: if not self.row_is_filtered(row, filters): ret.append(row) return ret
returns rows as filtered by filters
def register_template_directory(kb_app: kb, sphinx_app: Sphinx, sphinx_env: BuildEnvironment, docnames=List[str], ): template_bridge = sphinx_app.builder.templates actions = ResourceAction.get_callbacks(kb_app) for action in actions: f = os.path.dirname(inspect.getfile(action)) template_bridge.loaders.append(SphinxFileSystemLoader(f))
Add this resource's templates dir to template paths
def _load_fofn(cls, fofn): filenames = {} f = pyfastaq.utils.open_file_read(fofn) for line in f: fields = line.rstrip().split() if len(fields) == 1: filenames[fields[0]] = None elif len(fields) == 2: filenames[fields[0]] = fields[1] else: raise Error('Error at the following line of file ' + fofn + '. Expected at most 2 fields.\n' + line) pyfastaq.utils.close(f) return filenames
Returns dictionary of filename -> short name. Value is None whenever short name is not provided
def set_gamma_value(self, value): if isinstance(value, float) is False: raise TypeError("The type of __gamma_value must be float.") self.__gamma_value = value
setter Gamma value.
def _update_parsed_node_info(self, parsed_node, config): schema_override = config.config.get('schema') get_schema = self.get_schema_func() parsed_node.schema = get_schema(schema_override).strip() alias_override = config.config.get('alias') get_alias = self.get_alias_func() parsed_node.alias = get_alias(parsed_node, alias_override).strip() parsed_node.database = config.config.get( 'database', self.default_database ).strip() model_tags = config.config.get('tags', []) parsed_node.tags.extend(model_tags) config_dict = parsed_node.get('config', {}) config_dict.update(config.config) parsed_node.config = config_dict for hook_type in dbt.hooks.ModelHookType.Both: parsed_node.config[hook_type] = dbt.hooks.get_hooks(parsed_node, hook_type)
Given the SourceConfig used for parsing and the parsed node, generate and set the true values to use, overriding the temporary parse values set in _build_intermediate_parsed_node.
def dims_intersect(self): return set.intersection(*map( set, (getattr(arr, 'dims_intersect', arr.dims) for arr in self)))
Dimensions of the arrays in this list that are used in all arrays
def cd(self, dir_p): if not isinstance(dir_p, basestring): raise TypeError("dir_p can only be an instance of type basestring") progress = self._call("cd", in_p=[dir_p]) progress = IProgress(progress) return progress
Change the current directory level. in dir_p of type str The name of the directory to go in. return progress of type :class:`IProgress` Progress object to track the operation completion.
def get_moods(self): mood_parent = self._get_mood_parent() def process_result(result): return [self.get_mood(mood, mood_parent=mood_parent) for mood in result] return Command('get', [ROOT_MOODS, mood_parent], process_result=process_result)
Return moods defined on the gateway. Returns a Command.
def _stringify_predicate_value(value): if isinstance(value, bool): return str(value).lower() elif isinstance(value, Sequence) and not isinstance(value, six.string_types): return ','.join(_stringify_predicate_value(x) for x in value) elif isinstance(value, datetime.datetime): return value.isoformat(sep=' ') elif isinstance(value, datetime.date): return value.isoformat() elif value is None: return 'null-val' else: return str(value)
Convert Python objects to Space-Track compatible strings - Booleans (``True`` -> ``'true'``) - Sequences (``[25544, 34602]`` -> ``'25544,34602'``) - dates/datetimes (``date(2015, 12, 23)`` -> ``'2015-12-23'``) - ``None`` -> ``'null-val'``
def remove_all_network_profiles(self, obj): profile_name_list = self.network_profile_name_list(obj) for profile_name in profile_name_list: self._logger.debug("delete profile: %s", profile_name) str_buf = create_unicode_buffer(profile_name) ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf) self._logger.debug("delete result %d", ret)
Remove all the AP profiles.
def assert_matching_time_coord(arr1, arr2): message = ('Time weights not indexed by the same time coordinate as' ' computed data. This will lead to an improperly computed' ' time weighted average. Exiting.\n' 'arr1: {}\narr2: {}') if not (arr1[TIME_STR].identical(arr2[TIME_STR])): raise ValueError(message.format(arr1[TIME_STR], arr2[TIME_STR]))
Check to see if two DataArrays have the same time coordinate. Parameters ---------- arr1 : DataArray or Dataset First DataArray or Dataset arr2 : DataArray or Dataset Second DataArray or Dataset Raises ------ ValueError If the time coordinates are not identical between the two Datasets
def _uninstall(cls): if cls._hook: sys.meta_path.remove(cls._hook) cls._hook = None
uninstall the hook if installed
def _resolve_formatter(self, attr): if attr in COLORS: return self._resolve_color(attr) elif attr in COMPOUNDABLES: return self._formatting_string(self._resolve_capability(attr)) else: formatters = split_into_formatters(attr) if all(f in COMPOUNDABLES for f in formatters): return self._formatting_string( u''.join(self._resolve_formatter(s) for s in formatters)) else: return ParametrizingString(self._resolve_capability(attr))
Resolve a sugary or plain capability name, color, or compound formatting function name into a callable capability. Return a ``ParametrizingString`` or a ``FormattingString``.
def template_to_base_path(template, google_songs): if template == os.getcwd() or template == '%suggested%': base_path = os.getcwd() else: template = os.path.abspath(template) song_paths = [template_to_filepath(template, song) for song in google_songs] base_path = os.path.dirname(os.path.commonprefix(song_paths)) return base_path
Get base output path for a list of songs for download.
def _validate_jpx_box_sequence(self, boxes): self._validate_label(boxes) self._validate_jpx_compatibility(boxes, boxes[1].compatibility_list) self._validate_singletons(boxes) self._validate_top_level(boxes)
Run through series of tests for JPX box legality.
def rename(self, new_name_or_name_dict=None, **names): if names or utils.is_dict_like(new_name_or_name_dict): name_dict = either_dict_or_kwargs( new_name_or_name_dict, names, 'rename') dataset = self._to_temp_dataset().rename(name_dict) return self._from_temp_dataset(dataset) else: return self._replace(name=new_name_or_name_dict)
Returns a new DataArray with renamed coordinates or a new name. Parameters ---------- new_name_or_name_dict : str or dict-like, optional If the argument is dict-like, it it used as a mapping from old names to new names for coordinates. Otherwise, use the argument as the new name for this array. **names, optional The keyword arguments form of a mapping from old names to new names for coordinates. One of new_name_or_name_dict or names must be provided. Returns ------- renamed : DataArray Renamed array or array with renamed coordinates. See Also -------- Dataset.rename DataArray.swap_dims
def get_zipped_dataset_from_predictions(predictions): targets = stack_data_given_key(predictions, "targets") outputs = stack_data_given_key(predictions, "outputs") num_videos, num_steps = targets.shape[:2] outputs = outputs[:, :num_steps] targets_placeholder = tf.placeholder(targets.dtype, targets.shape) outputs_placeholder = tf.placeholder(outputs.dtype, outputs.shape) dataset = tf.data.Dataset.from_tensor_slices( (targets_placeholder, outputs_placeholder)) iterator = dataset.make_initializable_iterator() feed_dict = {targets_placeholder: targets, outputs_placeholder: outputs} return iterator, feed_dict, num_videos
Creates dataset from in-memory predictions.
def set_user_passwd(self, userid, data): return self.api_call( ENDPOINTS['users']['password'], dict(userid=userid), body=data)
Set user password
def path(path_name=None, override=None, *, root=None, name=None, ext=None, inject=None, relpath=None, reduce=False): path_name, identity, root = _initialize(path_name, override, root, inject) new_name = _process_name(path_name, identity, name, ext) new_directory = _process_directory(path_name, identity, root, inject) full_path = os.path.normpath(os.path.join(new_directory, new_name)) if APPEND_SEP_TO_DIRS and not new_name and full_path[-1] != os.sep: full_path += os.sep final_path = _format_path(full_path, root, relpath, reduce) return final_path
Path manipulation black magic
def _margtimephase_loglr(self, mf_snr, opt_snr): return special.logsumexp(numpy.log(special.i0(mf_snr)), b=self._deltat) - 0.5*opt_snr
Returns the log likelihood ratio marginalized over time and phase.
def get_currentDim(self): selfDim = self._dimensions.copy() if not isinstance(selfDim,dimStr): if selfDim.has_key('_ndims') : nself = selfDim.pop('_ndims') else : self.warning(1, 'self._dimensions does not have the _ndims key') nself = len(selfDim) else : nself = selfDim['_ndims'] curDim = [[key for key in selfDim.keys()],[selfDim[key] for key in selfDim.keys()]] return curDim, nself
returns the current dimensions of the object
def _prepare_menu(self, node, flat=None): if flat is None: flat = self.flat ItemGroup = MenuSection if flat else SubMenu return [ ItemGroup(branch.label, self._collapse_device(branch, flat)) for branch in node.branches if branch.methods or branch.branches ]
Prepare the menu hierarchy from the given device tree. :param Device node: root node of device hierarchy :returns: menu hierarchy as list
def get(self): self.lock.acquire() try: c = self.conn.popleft() yield c except self.exc_classes: gevent.spawn_later(1, self._addOne) raise except: self.conn.append(c) self.lock.release() raise else: self.conn.append(c) self.lock.release()
Get a connection from the pool, to make and receive traffic. If the connection fails for any reason (socket.error), it is dropped and a new one is scheduled. Please use @retry as a way to automatically retry whatever operation you were performing.
def plot(self): msg = "'%s.plot': ADW 2018-05-05"%self.__class__.__name__ DeprecationWarning(msg) import ugali.utils.plotting mask = hp.UNSEEN * np.ones(hp.nside2npix(self.nside)) mask[self.roi.pixels] = self.mask_roi_sparse mask[mask == 0.] = hp.UNSEEN ugali.utils.plotting.zoomedHealpixMap('Completeness Depth', mask, self.roi.lon, self.roi.lat, self.roi.config.params['coords']['roi_radius'])
Plot the magnitude depth.
def get_requirements(self, arguments, max_retries=None, use_wheels=False): arguments = self.decorate_arguments(arguments) with DownloadLogFilter(): with SetupRequiresPatch(self.config, self.eggs_links): self.create_build_directory() if any(match_option(a, '-U', '--upgrade') for a in arguments): logger.info("Checking index(es) for new version (-U or --upgrade was given) ..") else: try: return self.unpack_source_dists(arguments, use_wheels=use_wheels) except DistributionNotFound: logger.info("We don't have all distribution archives yet!") if max_retries is None: max_retries = self.config.max_retries for i in range(max_retries): try: return self.download_source_dists(arguments, use_wheels=use_wheels) except Exception as e: if i + 1 < max_retries: logger.warning("pip raised exception while downloading distributions: %s", e) else: raise logger.info("Retrying after pip failed (%i/%i) ..", i + 1, max_retries)
Use pip to download and unpack the requested source distribution archives. :param arguments: The command line arguments to ``pip install ...`` (a list of strings). :param max_retries: The maximum number of times that pip will be asked to download distribution archives (this helps to deal with intermittent failures). If this is :data:`None` then :attr:`~.Config.max_retries` is used. :param use_wheels: Whether pip and pip-accel are allowed to use wheels_ (:data:`False` by default for backwards compatibility with callers that use pip-accel as a Python API). .. warning:: Requirements which are already installed are not included in the result. If this breaks your use case consider using pip's ``--ignore-installed`` option.
def buildcommit(self): if len(self.dutinformation) > 0 and (self.dutinformation.get(0).build is not None): return self.dutinformation.get(0).build.commit_id return None
get build commit id. :return: build commit id or None if not found
def action_update(self): order = [] form = self.request.form attachments = form.get("attachments", []) for attachment in attachments: values = dict(attachment) uid = values.pop("UID") obj = api.get_object_by_uid(uid) if values.pop("delete", False): self.delete_attachment(obj) continue order.append(uid) obj.update(**values) obj.reindexObject() self.set_attachments_order(order) return self.request.response.redirect(self.context.absolute_url())
Form action enpoint to update the attachments
def close(self): close_command = StandardSend(self._address, COMMAND_LIGHT_OFF_0X13_0X00) self._send_method(close_command, self._close_message_received)
Send CLOSE command to device.
def _try_join_cancelled_thread(thread): thread.join(10) if thread.is_alive(): logging.warning("Thread %s did not terminate within grace period after cancellation", thread.name)
Join a thread, but if the thread doesn't terminate for some time, ignore it instead of waiting infinitely.
def sni2route(self, sni: SchemaNodeId, sctx: SchemaContext) -> SchemaRoute: nlist = sni.split("/") res = [] for qn in (nlist[1:] if sni[0] == "/" else nlist): res.append(self.translate_node_id(qn, sctx)) return res
Translate schema node identifier to a schema route. Args: sni: Schema node identifier (absolute or relative). sctx: Schema context. Raises: ModuleNotRegistered: If `mid` is not registered in the data model. UnknownPrefix: If a prefix specified in `sni` is not declared.
def dispatch_to_index_op(op, left, right, index_class): left_idx = index_class(left) if getattr(left_idx, 'freq', None) is not None: left_idx = left_idx._shallow_copy(freq=None) try: result = op(left_idx, right) except NullFrequencyError: raise TypeError('incompatible type for a datetime/timedelta ' 'operation [{name}]'.format(name=op.__name__)) return result
Wrap Series left in the given index_class to delegate the operation op to the index implementation. DatetimeIndex and TimedeltaIndex perform type checking, timezone handling, overflow checks, etc. Parameters ---------- op : binary operator (operator.add, operator.sub, ...) left : Series right : object index_class : DatetimeIndex or TimedeltaIndex Returns ------- result : object, usually DatetimeIndex, TimedeltaIndex, or Series
def add_ref(self, reftype, data): ref = (reftype, data) try: index = self.refs.index(ref) except ValueError: self.refs.append(ref) index = len(self.refs) - 1 return str(index)
Add a reference and returns the identifier.
def _csv_temp(self, cursor, fieldnames): temp_fd, temp_path = tempfile.mkstemp(text=True) with open(temp_fd, 'w', encoding='utf-8', newline='') as results_fh: self._csv(cursor, fieldnames, results_fh) return temp_path
Writes the rows of `cursor` in CSV format to a temporary file and returns the path to that file. :param cursor: database cursor containing data to be output :type cursor: `sqlite3.Cursor` :param fieldnames: row headings :type fieldnames: `list` :rtype: `str`
def enable(states): ret = { 'res': True, 'msg': '' } states = salt.utils.args.split_input(states) log.debug('states %s', states) msg = [] _disabled = __salt__['grains.get']('state_runs_disabled') if not isinstance(_disabled, list): _disabled = [] _changed = False for _state in states: log.debug('_state %s', _state) if _state not in _disabled: msg.append('Info: {0} state already enabled.'.format(_state)) else: msg.append('Info: {0} state enabled.'.format(_state)) _disabled.remove(_state) _changed = True if _changed: __salt__['grains.setval']('state_runs_disabled', _disabled) ret['msg'] = '\n'.join(msg) __salt__['saltutil.refresh_modules']() return ret
Enable state function or sls run CLI Example: .. code-block:: bash salt '*' state.enable highstate salt '*' state.enable test.succeed_without_changes .. note:: To enable a state file from running provide the same name that would be passed in a state.sls call. salt '*' state.disable bind.config
def validate(self, attrs): user = authenticate(**self.user_credentials(attrs)) if user: if user.is_active: self.instance = user else: raise serializers.ValidationError(_("This account is currently inactive.")) else: error = _("Invalid login credentials.") raise serializers.ValidationError(error) return attrs
checks if login credentials are correct
def from_lines(lines: Iterable[str], **kwargs) -> BELGraph: graph = BELGraph() parse_lines(graph=graph, lines=lines, **kwargs) return graph
Load a BEL graph from an iterable over the lines of a BEL script. :param lines: An iterable of strings (the lines in a BEL script) The remaining keyword arguments are passed to :func:`pybel.io.line_utils.parse_lines`.
def migrateFileFields(portal): portal_types = [ "Attachment", "ARImport", "Instrument", "InstrumentCertification", "Method", "Multifile", "Report", "ARReport", "SamplePoint"] for portal_type in portal_types: migrate_to_blob( portal, portal_type=portal_type, remove_old_value=True)
This function walks over all attachment types and migrates their FileField fields.
def get_channelstate_filter( chain_state: ChainState, payment_network_id: PaymentNetworkID, token_address: TokenAddress, filter_fn: Callable, ) -> List[NettingChannelState]: token_network = get_token_network_by_token_address( chain_state, payment_network_id, token_address, ) result: List[NettingChannelState] = [] if not token_network: return result for channel_state in token_network.channelidentifiers_to_channels.values(): if filter_fn(channel_state): result.append(channel_state) return result
Return the state of channels that match the condition in `filter_fn`
def remove(self): "Remove the hook from the model." if not self.removed: self.hook.remove() self.removed=True
Remove the hook from the model.
def add(self, data, name=None): if name is None: n = len(self.data) while "Series %d"%n in self.data: n += 1 name = "Series %d"%n self.data[name] = data return name
Appends a new column of data to the data source. Args: data (seq) : new data to add name (str, optional) : column name to use. If not supplied, generate a name of the form "Series ####" Returns: str: the column name used
def sync_time(self): now = time.localtime(time.time()) self.remote(set_time, (now.tm_year, now.tm_mon, now.tm_mday, now.tm_wday + 1, now.tm_hour, now.tm_min, now.tm_sec, 0)) return now
Sets the time on the pyboard to match the time on the host.
def optimal_part_info(length, part_size): if length == -1: length = MAX_MULTIPART_OBJECT_SIZE if length > MAX_MULTIPART_OBJECT_SIZE: raise InvalidArgumentError('Input content size is bigger ' ' than allowed maximum of 5TiB.') if part_size != MIN_PART_SIZE: part_size_float = float(part_size) else: part_size_float = math.ceil(length/MAX_MULTIPART_COUNT) part_size_float = (math.ceil(part_size_float/part_size) * part_size) total_parts_count = int(math.ceil(length/part_size_float)) part_size = int(part_size_float) last_part_size = length - int(total_parts_count-1)*part_size return total_parts_count, part_size, last_part_size
Calculate optimal part size for multipart uploads. :param length: Input length to calculate part size of. :return: Optimal part size.
def parse_image_spec(spec): match = re.match(r'(.+)\s+\"(.*)\"\s*$', spec) if match: spec, title = match.group(1, 2) else: title = None match = re.match(r'([^\{]*)(\{(.*)\})\s*$', spec) if match: spec = match.group(1) args = parse_arglist(match.group(3)) else: args = {} return spec, args, (title and html.unescape(title))
Parses out a Publ-Markdown image spec into a tuple of path, args, title
def _get_extended(scene, resp): root = ElementTree.fromstring(resp.text) items = root.findall("eemetadata:metadataFields/eemetadata:metadataField", NAMESPACES) scene['extended'] = {item.attrib.get('name').strip(): xsi.get(item[0]) for item in items} return scene
Parse metadata returned from the metadataUrl of a USGS scene. :param scene: Dictionary representation of a USGS scene :param resp: Response object from requests/grequests
def list(self, filter_args=None): res = list() for oid in self._resources: resource = self._resources[oid] if self._matches_filters(resource, filter_args): res.append(resource) return res
List the faked resources of this manager. Parameters: filter_args (dict): Filter arguments. `None` causes no filtering to happen. See :meth:`~zhmcclient.BaseManager.list()` for details. Returns: list of FakedBaseResource: The faked resource objects of this manager.
def get_all_tags_of_confirmation(self, confirmation_id): return self._iterate_through_pages( get_function=self.get_tags_of_confirmation_per_page, resource=CONFIRMATION_TAGS, **{'confirmation_id': confirmation_id} )
Get all tags of confirmation This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param confirmation_id: the confirmation id :return: list
def map(**kwargs): d = {} e = lower_dict(environ.copy()) for k, v in kwargs.items(): d[k] = e.get(v.lower()) return d
Returns a dictionary of the given keyword arguments mapped to their values from the environment, with input keys lower cased.
def get(self, sid): return IpAccessControlListMappingContext( self._version, account_sid=self._solution['account_sid'], domain_sid=self._solution['domain_sid'], sid=sid, )
Constructs a IpAccessControlListMappingContext :param sid: A 34 character string that uniquely identifies the resource to fetch. :returns: twilio.rest.api.v2010.account.sip.domain.ip_access_control_list_mapping.IpAccessControlListMappingContext :rtype: twilio.rest.api.v2010.account.sip.domain.ip_access_control_list_mapping.IpAccessControlListMappingContext
def set_system_id(self, system_id): yield from self._hypervisor.send('{platform} set_system_id "{name}" {system_id}'.format(platform=self._platform, name=self._name, system_id=system_id)) log.info('Router "{name}" [{id}]: system ID updated from {old_id} to {new_id}'.format(name=self._name, id=self._id, old_id=self._system_id, new_id=system_id)) self._system_id = system_id
Sets the system ID. :param system_id: a system ID (also called board processor ID)
def tarbell_update(command, args): with ensure_settings(command, args) as settings, ensure_project(command, args) as site: puts("Updating to latest blueprint\n") git = sh.git.bake(_cwd=site.base.base_dir) puts(colored.yellow("Stashing local changes")) puts(git.stash()) puts(colored.yellow("Pull latest changes")) puts(git.pull()) if git.stash.list(): puts(git.stash.pop())
Update the current tarbell project.
def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs): variable = self.variable.shift( shifts=shifts, fill_value=fill_value, **shifts_kwargs) return self._replace(variable=variable)
Shift this array by an offset along one or more dimensions. Only the data is moved; coordinates stay in place. Values shifted from beyond array bounds are replaced by NaN. This is consistent with the behavior of ``shift`` in pandas. Parameters ---------- shifts : Mapping with the form of {dim: offset} Integer offset to shift along each of the given dimensions. Positive offsets shift to the right; negative offsets shift to the left. fill_value: scalar, optional Value to use for newly missing values **shifts_kwargs: The keyword arguments form of ``shifts``. One of shifts or shifts_kwarg must be provided. Returns ------- shifted : DataArray DataArray with the same coordinates and attributes but shifted data. See also -------- roll Examples -------- >>> arr = xr.DataArray([5, 6, 7], dims='x') >>> arr.shift(x=1) <xarray.DataArray (x: 3)> array([ nan, 5., 6.]) Coordinates: * x (x) int64 0 1 2
def parse(self, rrstr): if self._initialized: raise pycdlibexception.PyCdlibInternalError('TF record already initialized!') (su_len, su_entry_version_unused, self.time_flags,) = struct.unpack_from('=BBB', rrstr[:5], 2) if su_len < 5: raise pycdlibexception.PyCdlibInvalidISO('Not enough bytes in the TF record') tflen = 7 if self.time_flags & (1 << 7): tflen = 17 offset = 5 for index, fieldname in enumerate(self.FIELDNAMES): if self.time_flags & (1 << index): if tflen == 7: setattr(self, fieldname, dates.DirectoryRecordDate()) elif tflen == 17: setattr(self, fieldname, dates.VolumeDescriptorDate()) getattr(self, fieldname).parse(rrstr[offset:offset + tflen]) offset += tflen self._initialized = True
Parse a Rock Ridge Time Stamp record out of a string. Parameters: rrstr - The string to parse the record out of. Returns: Nothing.
def end_experience_collection_timer(self): if self.time_start_experience_collection: curr_delta = time() - self.time_start_experience_collection if self.delta_last_experience_collection is None: self.delta_last_experience_collection = curr_delta else: self.delta_last_experience_collection += curr_delta self.time_start_experience_collection = None
Inform Metrics class that experience collection is done.
def icon(self): if isinstance(self._icon, str): if QtGui.QIcon.hasThemeIcon(self._icon): return QtGui.QIcon.fromTheme(self._icon) else: return QtGui.QIcon(self._icon) elif isinstance(self._icon, tuple): return QtGui.QIcon.fromTheme(self._icon[0], QtGui.QIcon(self._icon[1])) elif isinstance(self._icon, QtGui.QIcon): return self._icon return QtGui.QIcon()
Gets the icon file name. Read-only.
def deserialize(data): key = data.get('exc_path') if key in registry: exc_args = data.get('exc_args', ()) return registry[key](*exc_args) exc_type = data.get('exc_type') value = data.get('value') return RemoteError(exc_type=exc_type, value=value)
Deserialize `data` to an exception instance. If the `exc_path` value matches an exception registered as ``deserializable``, return an instance of that exception type. Otherwise, return a `RemoteError` instance describing the exception that occurred.
def _get_running_config(self, split=True): conn = self._get_connection() config = conn.get_config(source="running") if config: root = ET.fromstring(config._raw) running_config = root[0][0] if split is True: rgx = re.compile("\r*\n+") ioscfg = rgx.split(running_config.text) else: ioscfg = running_config.text return ioscfg
Get the IOS XE device's current running config. :return: Current IOS running config as multiline string
def _get_long_description(self): if self.description is None: return None lines = [x for x in self.description.split('\n')] if len(lines) == 1: return None elif len(lines) >= 3 and lines[1] == '': return '\n'.join(lines[2:]) return self.description
Return the subsequent lines of a multiline description Returns: string: The long description, otherwise None
def _default_key_normalizer(key_class, request_context): context = request_context.copy() context['scheme'] = context['scheme'].lower() context['host'] = context['host'].lower() for key in ('headers', '_proxy_headers', '_socks_options'): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) socket_opts = context.get('socket_options') if socket_opts is not None: context['socket_options'] = tuple(socket_opts) for key in list(context.keys()): context['key_' + key] = context.pop(key) for field in key_class._fields: if field not in context: context[field] = None return key_class(**context)
Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey
def load_module(prefix, epoch, data_names, data_shapes): sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch) pred_fc = sym.get_internals()['pred_fc_output'] sym = mx.sym.softmax(data=pred_fc) mod = mx.mod.Module(symbol=sym, context=mx.cpu(), data_names=data_names, label_names=None) mod.bind(for_training=False, data_shapes=data_shapes) mod.set_params(arg_params, aux_params, allow_missing=False) return mod
Loads the model from checkpoint specified by prefix and epoch, binds it to an executor, and sets its parameters and returns a mx.mod.Module
def check(self, spec, data): path_eval = self.path_eval for keypath, specvalue in spec.items(): if keypath.startswith('$'): optext = keypath checkable = data args = (optext, specvalue, checkable) generator = self.dispatch_operator(*args) else: try: checkable = path_eval(data, keypath) except self.InvalidPath: return False generator = self.dispatch_literal(specvalue, checkable) for result in generator: if not result: return False return True
Given a mongo-style spec and some data or python object, check whether the object complies with the spec. Fails eagerly.
def bounds(self): the_bounds = [np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf] def _update_bounds(bounds): def update_axis(ax): if bounds[ax*2] < the_bounds[ax*2]: the_bounds[ax*2] = bounds[ax*2] if bounds[ax*2+1] > the_bounds[ax*2+1]: the_bounds[ax*2+1] = bounds[ax*2+1] for ax in range(3): update_axis(ax) return for actor in self._actors.values(): if isinstance(actor, vtk.vtkCubeAxesActor): continue if ( hasattr(actor, 'GetBounds') and actor.GetBounds() is not None and id(actor) != id(self.bounding_box_actor)): _update_bounds(actor.GetBounds()) return the_bounds
Bounds of all actors present in the rendering window
def _bounds_from_array(arr, dim_name, bounds_name): spacing = arr.diff(dim_name).values lower = xr.DataArray(np.empty_like(arr), dims=arr.dims, coords=arr.coords) lower.values[:-1] = arr.values[:-1] - 0.5*spacing lower.values[-1] = arr.values[-1] - 0.5*spacing[-1] upper = xr.DataArray(np.empty_like(arr), dims=arr.dims, coords=arr.coords) upper.values[:-1] = arr.values[:-1] + 0.5*spacing upper.values[-1] = arr.values[-1] + 0.5*spacing[-1] bounds = xr.concat([lower, upper], dim='bounds') return bounds.T
Get the bounds of an array given its center values. E.g. if lat-lon grid center lat/lon values are known, but not the bounds of each grid box. The algorithm assumes that the bounds are simply halfway between each pair of center values.
def cluster_application(self, application_id): path = '/ws/v1/cluster/apps/{appid}'.format(appid=application_id) return self.request(path)
An application resource contains information about a particular application that was submitted to a cluster. :param str application_id: The application id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
def filter(self, info, releases): for version in list(releases.keys()): if any(pattern.match(version) for pattern in self.patterns): del releases[version]
Remove all release versions that match any of the specificed patterns.
def _compute_std(self, C, stddevs, idx): for stddev in stddevs: stddev[idx] += C['sigma']
Compute total standard deviation, see tables 3 and 4, pages 227 and 228.
def _read_pretrained_embeddings_file(file_uri: str, embedding_dim: int, vocab: Vocabulary, namespace: str = "tokens") -> torch.FloatTensor: file_ext = get_file_extension(file_uri) if file_ext in ['.h5', '.hdf5']: return _read_embeddings_from_hdf5(file_uri, embedding_dim, vocab, namespace) return _read_embeddings_from_text_file(file_uri, embedding_dim, vocab, namespace)
Returns and embedding matrix for the given vocabulary using the pretrained embeddings contained in the given file. Embeddings for tokens not found in the pretrained embedding file are randomly initialized using a normal distribution with mean and standard deviation equal to those of the pretrained embeddings. We support two file formats: * text format - utf-8 encoded text file with space separated fields: [word] [dim 1] [dim 2] ... The text file can eventually be compressed, and even resides in an archive with multiple files. If the file resides in an archive with other files, then ``embeddings_filename`` must be a URI "(archive_uri)#file_path_inside_the_archive" * hdf5 format - hdf5 file containing an embedding matrix in the form of a torch.Tensor. If the filename ends with '.hdf5' or '.h5' then we load from hdf5, otherwise we assume text format. Parameters ---------- file_uri : str, required. It can be: * a file system path or a URL of an eventually compressed text file or a zip/tar archive containing a single file. * URI of the type ``(archive_path_or_url)#file_path_inside_archive`` if the text file is contained in a multi-file archive. vocab : Vocabulary, required. A Vocabulary object. namespace : str, (optional, default=tokens) The namespace of the vocabulary to find pretrained embeddings for. trainable : bool, (optional, default=True) Whether or not the embedding parameters should be optimized. Returns ------- A weight matrix with embeddings initialized from the read file. The matrix has shape ``(vocab.get_vocab_size(namespace), embedding_dim)``, where the indices of words appearing in the pretrained embedding file are initialized to the pretrained embedding value.
def update_models(new_obj, current_table, tables, relations): _update_check_inputs(current_table, tables, relations) _check_no_current_table(new_obj, current_table) if isinstance(new_obj, Table): tables_names = [t.name for t in tables] _check_not_creating_duplicates(new_obj.name, tables_names, 'table', DuplicateTableException) return new_obj, tables + [new_obj], relations if isinstance(new_obj, Relation): tables_names = [t.name for t in tables] _check_colname_in_lst(new_obj.right_col, tables_names) _check_colname_in_lst(new_obj.left_col, tables_names) return current_table, tables, relations + [new_obj] if isinstance(new_obj, Column): columns_names = [c.name for c in current_table.columns] _check_not_creating_duplicates(new_obj.name, columns_names, 'column', DuplicateColumnException) current_table.columns.append(new_obj) return current_table, tables, relations msg = "new_obj cannot be of type {}" raise ValueError(msg.format(new_obj.__class__.__name__))
Update the state of the parsing.
def compress(self): for ast_token in self.ast_tokens: if type(ast_token) in self.dispatcher: self.dispatcher[type(ast_token)](ast_token) else: self.dispatcher['default'](ast_token)
Main function of compression.
def vm_result_update(self, payload): port_id = payload.get('port_id') result = payload.get('result') if port_id and result: params = dict(columns=dict(result=result)) self.update_vm_db(port_id, **params)
Update the result field in VM database. This request comes from an agent that needs to update the result in VM database to success or failure to reflect the operation's result in the agent.
def asxc(cls, obj): if isinstance(obj, cls): return obj if is_string(obj): return cls.from_name(obj) raise TypeError("Don't know how to convert <%s:%s> to Xcfunc" % (type(obj), str(obj)))
Convert object into Xcfunc.
def xml_records(filename): with Evtx(filename) as evtx: for xml, record in evtx_file_xml_view(evtx.get_file_header()): try: yield to_lxml(xml), None except etree.XMLSyntaxError as e: yield xml, e
If the second return value is not None, then it is an Exception encountered during parsing. The first return value will be the XML string. @type filename str @rtype: generator of (etree.Element or str), (None or Exception)
def quit(self): self.script.LOG.warn("Abort due to user choice!") sys.exit(self.QUIT_RC)
Exit the program due to user's choices.
def add_transition_list (self, list_input_symbols, state, action=None, next_state=None): if next_state is None: next_state = state for input_symbol in list_input_symbols: self.add_transition (input_symbol, state, action, next_state)
This adds the same transition for a list of input symbols. You can pass a list or a string. Note that it is handy to use string.digits, string.whitespace, string.letters, etc. to add transitions that match character classes. The action may be set to None in which case the process() method will ignore the action and only set the next_state. The next_state may be set to None in which case the current state will be unchanged.
def get_compositions(self): collection = JSONClientValidated('repository', collection='Composition', runtime=self._runtime) result = collection.find(self._view_filter()).sort('_id', DESCENDING) return objects.CompositionList(result, runtime=self._runtime, proxy=self._proxy)
Gets all ``Compositions``. return: (osid.repository.CompositionList) - a list of ``Compositions`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def rainDelay(self, dev_id, duration): path = 'device/rain_delay' payload = {'id': dev_id, 'duration': duration} return self.rachio.put(path, payload)
Rain delay device.
def parse_impl(self): parser = XMLParser(encoding=str('UTF-8')) element_iter = ET.iterparse(self.handle, events=("start", "end"), parser=parser) for pos, element in element_iter: tag, class_attr = _tag_and_class_attr(element) if tag == "h1" and pos == "end": if not self.user: self.user = element.text.strip() elif tag == "div" and "thread" in class_attr and pos == "start": participants = self.parse_participants(element) thread = self.parse_thread(participants, element_iter, True) self.save_thread(thread)
Parses the HTML content as a stream. This is far less memory intensive than loading the entire HTML file into memory, like BeautifulSoup does.
def items(self): for word in self._dictionary.keys(): yield word, self._dictionary[word]
Iterator over the words in the dictionary Yields: str: The next word in the dictionary int: The number of instances in the dictionary Note: This is the same as `dict.items()`
def set_interactive_policy(*, locals=None, banner=None, serve=None, prompt_control=None): policy = InteractiveEventLoopPolicy( locals=locals, banner=banner, serve=serve, prompt_control=prompt_control) asyncio.set_event_loop_policy(policy)
Use an interactive event loop by default.
def get_inner_fts(elt)->List[str]: "List the inner functions of a class." fts = [] for ft_name in elt.__dict__.keys(): if ft_name.startswith('_'): continue ft = getattr(elt, ft_name) if inspect.isfunction(ft): fts.append(f'{elt.__name__}.{ft_name}') if inspect.ismethod(ft): fts.append(f'{elt.__name__}.{ft_name}') if inspect.isclass(ft): fts += [f'{elt.__name__}.{n}' for n in get_inner_fts(ft)] return fts
List the inner functions of a class.
def _split_diff(merge_result, context_lines=3): collect = [] for item in _visible_in_diff(merge_result, context_lines=context_lines): if item is None: if collect: yield collect collect = [] else: collect.append(item)
Split diffs and context lines into groups based on None sentinel
def chk_qualifiers(self): if self.name == 'id2gos': return for ntd in self.associations: qual = ntd.Qualifier assert isinstance(qual, set), '{NAME}: QUALIFIER MUST BE A LIST: {NT}'.format( NAME=self.name, NT=ntd) assert qual != set(['']), ntd assert qual != set(['-']), ntd assert 'always' not in qual, 'SPEC SAID IT WOULD BE THERE'
Check format of qualifier
def infer_enum(node, context=None): enum_meta = extract_node( ) class_node = infer_func_form(node, enum_meta, context=context, enum=True)[0] return iter([class_node.instantiate_class()])
Specific inference function for enum Call node.
def _post(self, q, payload='', params=''): if (q[-1] == '/'): q = q[:-1] headers = {'Content-Type': 'application/json'} r = requests.post('{url}{q}?api_key={key}{params}'.format(url=self.url, q=q, key=self.api_key, params=params), headers=headers, data=payload) ret = DotDict(r.json()) if (not r.ok or ('error' in ret and ret.error == True)): raise Exception(r.url, r.reason, r.status_code, r.json()) return DotDict(r.json())
Generic POST wrapper including the api_key
def string_to_decimal(value, strict=True): if is_undefined(value): if strict: raise ValueError('The value cannot be null') return None try: return float(value) except ValueError: raise ValueError( 'The specified string "%s" does not represent an integer' % value)
Return a decimal corresponding to the string representation of a number. @param value: a string representation of an decimal number. @param strict: indicate whether the specified string MUST be of a valid decimal number representation. @return: the decimal value represented by the string. @raise ValueError: if the string doesn't represent a valid decimal, while the argument ``strict`` equals ``True``.
def phase_progeny_by_transmission(g): g = GenotypeArray(g, dtype='i1', copy=True) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) return g
Phase progeny genotypes from a trio or cross using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. Returns ------- g : ndarray, int8, shape (n_variants, n_samples, 2) Genotype array with progeny phased where possible. Examples -------- >>> import allel >>> g = allel.GenotypeArray([ ... [[0, 0], [0, 0], [0, 0]], ... [[1, 1], [1, 1], [1, 1]], ... [[0, 0], [1, 1], [0, 1]], ... [[1, 1], [0, 0], [0, 1]], ... [[0, 0], [0, 1], [0, 0]], ... [[0, 0], [0, 1], [0, 1]], ... [[0, 1], [0, 0], [0, 1]], ... [[0, 1], [0, 1], [0, 1]], ... [[0, 1], [1, 2], [0, 1]], ... [[1, 2], [0, 1], [1, 2]], ... [[0, 1], [2, 3], [0, 2]], ... [[2, 3], [0, 1], [1, 3]], ... [[0, 0], [0, 0], [-1, -1]], ... [[0, 0], [0, 0], [1, 1]], ... ], dtype='i1') >>> g = allel.phase_progeny_by_transmission(g) >>> print(g.to_str(row_threshold=None)) 0/0 0/0 0|0 1/1 1/1 1|1 0/0 1/1 0|1 1/1 0/0 1|0 0/0 0/1 0|0 0/0 0/1 0|1 0/1 0/0 1|0 0/1 0/1 0/1 0/1 1/2 0|1 1/2 0/1 2|1 0/1 2/3 0|2 2/3 0/1 3|1 0/0 0/0 ./. 0/0 0/0 1/1 >>> g.is_phased array([[False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, False]])
def addReferenceSet(self, referenceSet): id_ = referenceSet.getId() self._referenceSetIdMap[id_] = referenceSet self._referenceSetNameMap[referenceSet.getLocalId()] = referenceSet self._referenceSetIds.append(id_)
Adds the specified reference set to this data repository.
def add_subscription(self, channel, callback_function): if channel not in CHANNELS: CHANNELS.append(channel) SUBSCRIPTIONS[channel] = [callback_function] else: SUBSCRIPTIONS[channel].append(callback_function) if self._subscribed: _LOGGER.info("New channel added after main subscribe call.") self._pubnub.subscribe().channels(channel).execute()
Add a channel to subscribe to and a callback function to run when the channel receives an update. If channel already exists, create a new "subscription" and append another callback function. Args: channel (str): The channel to add a subscription too. callback_function (func): The function to run on an update to the passed in channel.
def _debug_mode_responses(self, request, response): if django.conf.settings.DEBUG_GMN: if 'pretty' in request.GET: response['Content-Type'] = d1_common.const.CONTENT_TYPE_TEXT if ( 'HTTP_VENDOR_PROFILE_SQL' in request.META or django.conf.settings.DEBUG_PROFILE_SQL ): response_list = [] for query in django.db.connection.queries: response_list.append('{}\n{}'.format(query['time'], query['sql'])) return django.http.HttpResponse( '\n\n'.join(response_list), d1_common.const.CONTENT_TYPE_TEXT ) return response
Extra functionality available in debug mode. - If pretty printed output was requested, force the content type to text. This causes the browser to not try to format the output in any way. - If SQL profiling is turned on, return a page with SQL query timing information instead of the actual response.
def _start_monitoring(self): before = self._file_timestamp_info(self.path) while True: gevent.sleep(1) after = self._file_timestamp_info(self.path) added = [fname for fname in after.keys() if fname not in before.keys()] removed = [fname for fname in before.keys() if fname not in after.keys()] modified = [] for fname in before.keys(): if fname not in removed: if os.path.getmtime(fname) != before.get(fname): modified.append(fname) if added: self.on_create(added) if removed: self.on_delete(removed) if modified: self.on_modify(modified) before = after
Internal method that monitors the directory for changes
def save_csv(self, filename, write_header_separately=True): txt = '' with open(filename, "w") as f: if write_header_separately: f.write(','.join([c for c in self.header]) + '\n') for row in self.arr: txt = ','.join([self.force_to_string(col) for col in row]) f.write(txt + '\n') f.write('\n')
save the default array as a CSV file
def registered(self, socket_client): self._socket_client = socket_client if self.target_platform: self._message_func = self._socket_client.send_platform_message else: self._message_func = self._socket_client.send_app_message
Called when a controller is registered.
def multi_select(self, elements_to_select): first_element = elements_to_select.pop() self.click(first_element) for index, element in enumerate(elements_to_select, start=1): self.multi_click(element)
Multi-select any number of elements. :param elements_to_select: list of WebElement instances :return: None
def sign_message(self, key, message, verbose=False): secret_exponent = key.secret_exponent() if not secret_exponent: raise ValueError("Private key is required to sign a message") addr = key.address() msg_hash = self.hash_for_signing(message) is_compressed = key.is_compressed() sig = self.signature_for_message_hash(secret_exponent, msg_hash, is_compressed) if not verbose or message is None: return sig return self.signature_template.format( msg=message, sig=sig, addr=addr, net_name=self._network_name.upper())
Return a signature, encoded in Base64, which can be verified by anyone using the public key.
def custom_property_prefix_lax(instance): for prop_name in instance.keys(): if (instance['type'] in enums.PROPERTIES and prop_name not in enums.PROPERTIES[instance['type']] and prop_name not in enums.RESERVED_PROPERTIES and not CUSTOM_PROPERTY_LAX_PREFIX_RE.match(prop_name)): yield JSONError("Custom property '%s' should have a type that " "starts with 'x_' in order to be compatible with " "future versions of the STIX 2 specification." % prop_name, instance['id'], 'custom-prefix-lax')
Ensure custom properties follow lenient naming style conventions for forward-compatibility. Does not check property names in custom objects.
def add_data_attribute(self, data_attr): if data_attr.header.attr_type_id is not AttrTypes.DATA: raise DataStreamError("Invalid attribute. A Datastream deals only with DATA attributes") if data_attr.header.attr_name != self.name: raise DataStreamError(f"Data from a different stream '{data_attr.header.attr_name}' cannot be add to this stream") if data_attr.header.non_resident: nonr_header = data_attr.header if self._data_runs is None: self._data_runs = [] if nonr_header.end_vcn > self.cluster_count: self.cluster_count = nonr_header.end_vcn if not nonr_header.start_vcn: self.size = nonr_header.curr_sstream self.alloc_size = nonr_header.alloc_sstream self._data_runs.append((nonr_header.start_vcn, nonr_header.data_runs)) self._data_runs_sorted = False else: self.size = self.alloc_size = data_attr.header.content_len self._pending_processing = None self._content = data_attr.content.content
Interprets a DATA attribute and add it to the datastream.
def resolve(tex): soup = TexSoup(tex) for subimport in soup.find_all('subimport'): path = subimport.args[0] + subimport.args[1] subimport.replace_with(*resolve(open(path)).contents) for _import in soup.find_all('import'): _import.replace_with(*resolve(open(_import.args[0])).contents) for include in soup.find_all('include'): include.replace_with(*resolve(open(include.args[0])).contents) return soup
Resolve all imports and update the parse tree. Reads from a tex file and once finished, writes to a tex file.
def authorization_code_pkce(self, client_id, code_verifier, code, redirect_uri, grant_type='authorization_code'): return self.post( 'https://{}/oauth/token'.format(self.domain), data={ 'client_id': client_id, 'code_verifier': code_verifier, 'code': code, 'grant_type': grant_type, 'redirect_uri': redirect_uri, }, headers={'Content-Type': 'application/json'} )
Authorization code pkce grant This is the OAuth 2.0 grant that mobile apps utilize in order to access an API. Use this endpoint to exchange an Authorization Code for a Token. Args: grant_type (str): Denotes the flow you're using. For authorization code pkce use authorization_code client_id (str): your application's client Id code_verifier (str): Cryptographically random key that was used to generate the code_challenge passed to /authorize. code (str): The Authorization Code received from the /authorize Calls redirect_uri (str, optional): This is required only if it was set at the GET /authorize endpoint. The values must match Returns: access_token, id_token