code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _violinplot(val, shade, bw, ax, **kwargs_shade): density, low_b, up_b = _fast_kde(val, bw=bw) x = np.linspace(low_b, up_b, len(density)) x = np.concatenate([x, x[::-1]]) density = np.concatenate([-density, density[::-1]]) ax.fill_betweenx(x, density, alpha=shade, lw=0, **kwargs_shade)
Auxiliary function to plot violinplots.
def assert_sympy_expressions_equal(expr1, expr2): if not sympy_expressions_equal(expr1, expr2): raise AssertionError("{0!r} != {1!r}".format(expr1, expr2))
Raises `AssertionError` if `expr1` is not equal to `expr2`. :param expr1: first expression :param expr2: second expression :return: None
def record(self): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Descriptor not initialized') rec = struct.pack(self.FMT, b'\x00' * 16, self.vol_desc_seqnum, self.desc_char_set, self.logical_vol_id...
A method to generate the string representing this UDF Logical Volume Descriptor. Parameters: None. Returns: A string representing this UDF Logical Volume Descriptor.
def initialize(self): if not self._initialized: logger.info("initializing %r", self) if not os.path.exists(self.path): if self.mode is not None: os.makedirs(self.path, mode=self.mode) else: os.makedirs(self.path) ...
create the directory if needed and configure it :return: None
def check_address(address): if isinstance(address, tuple): check_host(address[0]) check_port(address[1]) elif isinstance(address, string_types): if os.name != 'posix': raise ValueError('Platform does not support UNIX domain sockets') if not (os.path.exists(address) or...
Check if the format of the address is correct Arguments: address (tuple): (``str``, ``int``) representing an IP address and port, respectively .. note:: alternatively a local ``address`` can be a ``str`` when working with UNIX domain sock...
def argsort_indices(a, axis=-1): a = np.asarray(a) ind = list(np.ix_(*[np.arange(d) for d in a.shape])) ind[axis] = a.argsort(axis) return tuple(ind)
Like argsort, but returns an index suitable for sorting the the original array even if that array is multidimensional
def pick_frequency_line(self, filename, frequency, cumulativefield='cumulative_frequency'): if resource_exists('censusname', filename): with closing(resource_stream('censusname', filename)) as b: g = codecs.iterdecode(b, 'ascii') return self._pick_frequency_line(g, fr...
Given a numeric frequency, pick a line from a csv with a cumulative frequency field
def chat_delete(self, *, channel: str, ts: str, **kwargs) -> SlackResponse: kwargs.update({"channel": channel, "ts": ts}) return self.api_call("chat.delete", json=kwargs)
Deletes a message. Args: channel (str): Channel containing the message to be deleted. e.g. 'C1234567890' ts (str): Timestamp of the message to be deleted. e.g. '1234567890.123456'
def match_rule_patterns(fixed_text, cur=0): pattern = exact_find_in_pattern(fixed_text, cur, RULE_PATTERNS) if len(pattern) > 0: return {"matched": True, "found": pattern[0]['find'], "replaced": pattern[0]['replace'], "rules": pattern[0]['rules']} else: return {"matched": Fal...
Matches given text at cursor position with rule patterns Returns a dictionary of four elements: - "matched" - Bool: depending on if match found - "found" - string/None: Value of matched pattern's 'find' key or none - "replaced": string Replaced string if match found else input string at cursor ...
def _get_all_filtered_channels(self, topics_without_signature): mpe_address = self.get_mpe_address() event_signature = self.ident.w3.sha3(text="ChannelOpen(uint256,uint256,address,address,address,bytes32,uint256,uint256)").hex() topics = [event_signature] + topics_without_signature l...
get all filtered chanels from blockchain logs
def convert_mask_to_pil(mask, real=True): from PIL import Image header = mask._layer._psd._record.header channel_ids = [ci.id for ci in mask._layer._record.channel_info] if real and mask._has_real(): width = mask._data.real_right - mask._data.real_left height = mask._data.real_bottom - m...
Convert Mask to PIL Image.
def initialize(): from zsl.interface.web.performers.default import create_not_found_mapping from zsl.interface.web.performers.resource import create_resource_mapping create_not_found_mapping() create_resource_mapping()
Import in this form is necessary so that we avoid the unwanted behavior and immediate initialization of the application objects. This makes the initialization procedure run in the time when it is necessary and has every required resources.
def roman2int(s): val = 0 pos10 = 1000 beg = 0 for pos in range(3, -1, -1): for digit in range(9,-1,-1): r = roman[pos][digit] if s.startswith(r, beg): beg += len(r) val += digit * pos10 break pos10 //= 10 return...
Decode roman number :param s: string representing a roman number between 1 and 9999 :returns: the decoded roman number :complexity: linear (if that makes sense for constant bounded input size)
def list_accounts(self, id, max_id=None, min_id=None, since_id=None, limit=None): id = self.__unpack_id(id) if max_id != None: max_id = self.__unpack_id(max_id) if min_id != None: min_id = self.__unpack_id(min_id) if since_id != None: since_id = self._...
Get the accounts that are on the given list. A `limit` of 0 can be specified to get all accounts without pagination. Returns a list of `user dicts`_.
def diff_safe(cls, value): if isinstance(value, Frame): return {'_str': str(value), '_id': value._id} elif isinstance(value, (list, tuple)): return [cls.diff_safe(v) for v in value] return value
Return a value that can be safely stored as a diff
def calculated_intervals(self, value): if not value: self._calculated_intervals = TimeIntervals() return if isinstance(value, TimeInterval): value = TimeIntervals([value]) elif isinstance(value, TimeIntervals): pass elif isinstance(value, l...
Set the calculated intervals This will be written to the stream_status collection if it's in the database channel :param value: The calculated intervals :type value: TimeIntervals, TimeInterval, list[TimeInterval]
def export_json(self, filename): json_graph = self.to_json() with open(filename, 'wb') as f: f.write(json_graph.encode('utf-8'))
Export graph in JSON form to the given file.
def get_banks_by_assessment_part(self, assessment_part_id): mgr = self._get_provider_manager('ASSESSMENT', local=True) lookup_session = mgr.get_bank_lookup_session(proxy=self._proxy) return lookup_session.get_banks_by_ids( self.get_bank_ids_by_assessment_part(assessment_part_id))
Gets the ``Banks`` mapped to an ``AssessmentPart``. arg: assessment_part_id (osid.id.Id): ``Id`` of an ``AssessmentPart`` return: (osid.assessment.BankList) - list of banks raise: NotFound - ``assessment_part_id`` is not found raise: NullArgument - ``assessment_part...
def adjust_opts(in_opts, config): memory_adjust = config["algorithm"].get("memory_adjust", {}) out_opts = [] for opt in in_opts: if opt.startswith("-Xmx") or (opt.startswith("-Xms") and memory_adjust.get("direction") == "decrease"): arg = opt[:4] opt = "{arg}{val}".format(arg...
Establish JVM opts, adjusting memory for the context if needed. This allows using less or more memory for highly parallel or multicore supporting processes, respectively.
def _handle_sigusr1(signum: int, frame: Any) -> None: print('=' * 70) print(''.join(traceback.format_stack())) print('-' * 70)
Print stacktrace.
def do_fish_complete(cli, prog_name): commandline = os.environ['COMMANDLINE'] args = split_args(commandline)[1:] if args and not commandline.endswith(' '): incomplete = args[-1] args = args[:-1] else: incomplete = '' for item, help in get_choices(cli, prog_name, args, incompl...
Do the fish completion Parameters ---------- cli : click.Command The main click Command of the program prog_name : str The program name on the command line Returns ------- bool True if the completion was successful, False otherwise
def opath_from_ext(self, ext): return os.path.join(self.workdir, self.prefix.odata + "_" + ext)
Returns the path of the output file with extension ext. Use it when the file does not exist yet.
def removeTags(dom): try: string_type = basestring except NameError: string_type = str element_stack = None if type(dom) in [list, tuple]: element_stack = dom elif isinstance(dom, HTMLElement): element_stack = dom.childs if dom.isTag() else [dom] elif isinstance(d...
Remove all tags from `dom` and obtain plaintext representation. Args: dom (str, obj, array): str, HTMLElement instance or array of elements. Returns: str: Plain string without tags.
def upload(ctx): settings.add_cli_options(ctx.cli_options, settings.TransferAction.Upload) ctx.initialize(settings.TransferAction.Upload) specs = settings.create_upload_specifications( ctx.cli_options, ctx.config) del ctx.cli_options for spec in specs: blobxfer.api.Uploader( ...
Upload files to Azure Storage
def get_sub_dim(src_ds, scale=None, maxdim=1024): ns = src_ds.RasterXSize nl = src_ds.RasterYSize maxdim = float(maxdim) if scale is None: scale_ns = ns/maxdim scale_nl = nl/maxdim scale = max(scale_ns, scale_nl) if scale > 1: ns = int(round(ns/scale)) nl = in...
Compute dimensions of subsampled dataset Parameters ---------- ds : gdal.Dataset Input GDAL Datset scale : int, optional Scaling factor maxdim : int, optional Maximum dimension along either axis, in pixels Returns ------- ns Numper of samples in s...
def create_inline(project, resource, offset): pyname = _get_pyname(project, resource, offset) message = 'Inline refactoring should be performed on ' \ 'a method, local variable or parameter.' if pyname is None: raise rope.base.exceptions.RefactoringError(message) if isinstance(pyna...
Create a refactoring object for inlining Based on `resource` and `offset` it returns an instance of `InlineMethod`, `InlineVariable` or `InlineParameter`.
def cli_run(): options = CLI.parse_args() run(options.CONFIGURATION, options.log_level, options.log_target, options.log_journal)
Run the daemon from a command line interface
def main(arguments=None): if arguments is None: arguments = sys.argv[1:] server_parameters = get_server_parameters(arguments) config = get_config(server_parameters.config_path, server_parameters.use_environment) configure_log(config, server_parameters.log_level.upper()) validate_config(confi...
Runs thumbor server with the specified arguments.
def beginning_of_line(event): " Move to the start of the current line. " buff = event.current_buffer buff.cursor_position += buff.document.get_start_of_line_position(after_whitespace=False)
Move to the start of the current line.
async def pong(self, data: bytes = b"") -> None: await self.ensure_open() data = encode_data(data) await self.write_frame(True, OP_PONG, data)
This coroutine sends a pong. An unsolicited pong may serve as a unidirectional heartbeat. The content may be overridden with the optional ``data`` argument which must be a string (which will be encoded to UTF-8) or a bytes-like object.
def update(self, cur_value, mesg=None): self.cur_value = cur_value progress = float(self.cur_value) / self.max_value num_chars = int(progress * self.max_chars) num_left = self.max_chars - num_chars if mesg is not None: self.mesg = mesg bar = self.template.form...
Update progressbar with current value of process Parameters ---------- cur_value : number Current value of process. Should be <= max_value (but this is not enforced). The percent of the progressbar will be computed as (cur_value / max_value) * 100 m...
def _create_row_labels(self): labels = {} for c in self._columns: labels[c] = c if self._alt_labels: for k in self._alt_labels.keys(): labels[k] = self._alt_labels[k] if self._label_suffix: for k in labels.keys(): if k i...
Take the original labels for rows. Rename if alternative labels are provided. Append label suffix if label_suffix is True. Returns ---------- labels : dictionary Dictionary, keys are original column name, values are final label.
async def select(self, db): res = True async with self._cond: for i in range(self.freesize): res = res and (await self._pool[i].select(db)) else: self._db = db return res
Changes db index for all free connections. All previously acquired connections will be closed when released.
def matrix(fasta_path: 'path to tictax annotated fasta input', scafstats_path: 'path to BBMap scaftstats file'): records = SeqIO.parse(fasta_path, 'fasta') df = tictax.matrix(records, scafstats_path) df.to_csv(sys.stdout)
Generate taxonomic count matrix from tictax classified contigs
def randdomain(self): return '.'.join( rand_readable(3, 6, use=self.random, density=3) for _ in range(self.random.randint(1, 2)) ).lower()
-> a randomized domain-like name
def allocate_ip_for_subnet(self, subnet_id, mac, port_id): subnet = self.get_subnet(subnet_id) ip, mask, port_id = self.a10_allocate_ip_from_dhcp_range(subnet, "vlan", mac, port_id) return ip, mask, port_id
Allocates an IP from the specified subnet and creates a port
def transformer(self): ttype = self.embedding.lower() if ttype == 'mds': return MDS(n_components=2, random_state=self.random_state) if ttype == 'tsne': return TSNE(n_components=2, random_state=self.random_state) raise YellowbrickValueError("unknown embedding '{}'"...
Creates the internal transformer that maps the cluster center's high dimensional space to its two dimensional space.
def add_resource_types(resource_i, types): if types is None: return [] existing_type_ids = [] if resource_i.types: for t in resource_i.types: existing_type_ids.append(t.type_id) new_type_ids = [] for templatetype in types: if templatetype.id in existing_type_ids: ...
Save a reference to the types used for this resource. @returns a list of type_ids representing the type ids on the resource.
def get_descriptor_output(descriptor, key, handler=None): line = 'stub' lines = '' while line != '': try: line = descriptor.readline() lines += line except UnicodeDecodeError: error_msg = "Error while decoding output of process {}".format(key) ...
Get the descriptor output and handle incorrect UTF-8 encoding of subprocess logs. In case an process contains valid UTF-8 lines as well as invalid lines, we want to preserve the valid and remove the invalid ones. To do this we need to get each line and check for an UnicodeDecodeError.
def abstracts(self, key, value): result = [] source = force_single_element(value.get('9')) for a_value in force_list(value.get('a')): result.append({ 'source': source, 'value': a_value, }) return result
Populate the ``abstracts`` key.
def base_url(self, space_id, content_type_id, environment_id=None, **kwargs): return "spaces/{0}{1}/content_types/{2}/editor_interface".format( space_id, '/environments/{0}'.format(environment_id) if environment_id is not None else '', content_type_id )
Returns the URI for the editor interface.
def get_all(self, name, default=None): if default is None: default = [] return self._headers.get_list(name) or default
make cookie python 3 version use this instead of getheaders
def columnCount(self, parent): if parent.isValid(): return parent.internalPointer().columnCount() else: return self.root.columnCount()
Returns the number of columns for the children of the given parent.
def _work_path_to_rel_final_path(path, upload_path_mapping, upload_base_dir): if not path or not isinstance(path, str): return path upload_path = None if upload_path_mapping.get(path) is not None and os.path.isfile(path): upload_path = upload_path_mapping[path] else: paths_to_che...
Check if `path` is a work-rooted path, and convert to a relative final-rooted path
def delete_all_thumbnails(path, recursive=True): total = 0 for thumbs in all_thumbnails(path, recursive=recursive).values(): total += _delete_using_thumbs_list(thumbs) return total
Delete all files within a path which match the thumbnails pattern. By default, matching files from all sub-directories are also removed. To only remove from the path directory, set recursive=False.
def encode_csv(data_dict, column_names): import csv import six values = [str(data_dict[x]) for x in column_names] str_buff = six.StringIO() writer = csv.writer(str_buff, lineterminator='') writer.writerow(values) return str_buff.getvalue()
Builds a csv string. Args: data_dict: dict of {column_name: 1 value} column_names: list of column names Returns: A csv string version of data_dict
def e2dnde_deriv(self, x, params=None): params = self.params if params is None else params return np.squeeze(self.eval_e2dnde_deriv(x, params, self.scale, self.extra_params))
Evaluate derivative of E^2 times differential flux with respect to E.
def local_attr(self, name, context=None): result = [] if name in self.locals: result = self.locals[name] else: class_node = next(self.local_attr_ancestors(name, context), None) if class_node: result = class_node.locals[name] result = [n...
Get the list of assign nodes associated to the given name. Assignments are looked for in both this class and in parents. :returns: The list of assignments to the given name. :rtype: list(NodeNG) :raises AttributeInferenceError: If no attribute with this name can be found i...
def encoded_content(self, path): if path in self.__class__.asset_contents: return self.__class__.asset_contents[path] data = self.read_bytes(path) self.__class__.asset_contents[path] = force_text(base64.b64encode(data)) return self.__class__.asset_contents[path]
Return the base64 encoded contents
def path_join(*args): return SEP.join((x for x in args if x not in (None, ''))).strip(SEP)
Join path parts to single path.
def copy(self, version=None, tx_ins=None, tx_outs=None, lock_time=None, tx_joinsplits=None, joinsplit_pubkey=None, joinsplit_sig=None): return SproutTx( version=version if version is not None else self.version, tx_ins=tx_ins if tx_ins is not None else self.tx_ins, ...
SproutTx, ... -> Tx Makes a copy. Allows over-writing specific pieces.
def controldata(self): result = {} if self._version_file_exists() and self.state != 'creating replica': try: env = {'LANG': 'C', 'LC_ALL': 'C', 'PATH': os.getenv('PATH')} if os.getenv('SYSTEMROOT') is not None: env['SYSTEMROOT'] = os.getenv...
return the contents of pg_controldata, or non-True value if pg_controldata call failed
def segment_snrs(filters, stilde, psd, low_frequency_cutoff): snrs = [] norms = [] for bank_template in filters: snr, _, norm = matched_filter_core( bank_template, stilde, h_norm=bank_template.sigmasq(psd), psd=None, low_frequency_cutoff=low_frequency_cutoff) ...
This functions calculates the snr of each bank veto template against the segment Parameters ---------- filters: list of FrequencySeries The list of bank veto templates filters. stilde: FrequencySeries The current segment of data. psd: FrequencySeries low_frequency_cutoff: fl...
def _calculate_average(self, points): assert len(self.theta) == len(points), \ "points has length %i, but should have length %i" % \ (len(points), len(self.theta)) new_point = {'x': 0, 'y': 0, 'time': 0} for key in new_point: new_point[key] = self.theta[0] * p...
Calculate the arithmetic mean of the points x and y coordinates seperately.
def fit(self, blocks, y=None): self.kmeans.fit(make_weninger_features(blocks)) self.kmeans.cluster_centers_.sort(axis=0) self.kmeans.cluster_centers_[0, :] = np.zeros(2) return self
Fit a k-means clustering model using an ordered sequence of blocks.
def parse_boolean(value): if value is None: return None if isinstance(value, bool): return value if isinstance(value, string_types): value = value.lower() if value == 'false': return False if value == 'true': return True raise ValueError("C...
Coerce a value to boolean. :param value: the value, could be a string, boolean, or None :return: the value as coerced to a boolean
def file_length(file_obj): file_obj.seek(0, 2) length = file_obj.tell() file_obj.seek(0) return length
Returns the length in bytes of a given file object. Necessary because os.fstat only works on real files and not file-like objects. This works on more types of streams, primarily StringIO.
def start(self, level="WARN"): if self.active: return handler = StreamHandler() handler.setFormatter(Formatter(self.LOGFMT)) self.addHandler(handler) self.setLevel(level.upper()) self.active = True return
Start logging with this logger. Until the logger is started, no messages will be emitted. This applies to all loggers with the same name and any child loggers. Messages less than the given priority level will be ignored. The default level is 'WARN', which conforms to the *nix conventio...
def get(self, key, default=None, type=None): try: value = self[key] if type is not None: return type(value) return value except (KeyError, ValueError): return default
Returns the first value for a key. If `type` is not None, the value will be converted by calling `type` with the value as argument. If type() raises `ValueError`, it will be treated as if the value didn't exist, and `default` will be returned instead.
def from_file(cls, filename): with open(filename) as f: molecule, origin, axes, nrep, subtitle, nuclear_charges = \ read_cube_header(f) data = np.zeros(tuple(nrep), float) tmp = data.ravel() counter = 0 while True: line ...
Create a cube object by loading data from a file. *Arguemnts:* filename The file to load. It must contain the header with the description of the grid and the molecule.
def _loh_to_vcf(cur): cn = int(float(cur["C"])) minor_cn = int(float(cur["M"])) if cur["type"].find("LOH"): svtype = "LOH" elif cn > 2: svtype = "DUP" elif cn < 1: svtype = "DEL" else: svtype = None if svtype: info = ["SVTYPE=%s" % svtype, "END=%s" % c...
Convert LOH output into standardized VCF.
def _parse_rule(self, rule): values = rule.strip().split(self.RULE_DELIM, 4) if len(values) >= 4: codes = values[3].split(',') for i in range(0, len(codes)): try: codes[i] = int(codes[i], 0) except ValueError as e: ...
Parses an extraction rule. @rule - Rule string. Returns an array of ['<case insensitive matching string>', '<file extension>', '<command to run>', '<comma separated return codes>', <recurse into extracted directories: True|False>].
def db_for_write(self, model, **hints): try: if model.sf_access == READ_ONLY: raise WriteNotSupportedError("%r is a read-only model." % model) except AttributeError: pass return None
Prevent write actions on read-only tables. Raises: WriteNotSupportedError: If models.sf_access is ``read_only``.
def basic_filter_languages(languages, ranges): if LanguageRange.WILDCARD in ranges: yield from languages return found = set() for language_range in ranges: range_str = language_range.match_str for language in languages: if language in found: contin...
Filter languages using the string-based basic filter algorithm described in RFC4647. `languages` must be a sequence of :class:`LanguageTag` instances which are to be filtered. `ranges` must be an iterable which represent the basic language ranges to filter with, in priority order. The language ran...
def _calculateCrcString(inputstring): _checkString(inputstring, description='input CRC string') register = 0xFFFF for char in inputstring: register = (register >> 8) ^ _CRC16TABLE[(register ^ ord(char)) & 0xFF] return _numToTwoByteString(register, LsbFirst=True)
Calculate CRC-16 for Modbus. Args: inputstring (str): An arbitrary-length message (without the CRC). Returns: A two-byte CRC string, where the least significant byte is first.
def train_agent(real_env, learner, world_model_dir, hparams, epoch): initial_frame_chooser = rl_utils.make_initial_frame_chooser( real_env, hparams.frame_stack_size, hparams.simulation_random_starts, hparams.simulation_flip_first_random_for_beginning ) env_fn = rl.make_simulated_env_fn_from_hparams( ...
Train the PPO agent in the simulated environment.
def _handle_usecols(self, columns, usecols_key): if self.usecols is not None: if callable(self.usecols): col_indices = _evaluate_usecols(self.usecols, usecols_key) elif any(isinstance(u, str) for u in self.usecols): if len(columns) > 1: ...
Sets self._col_indices usecols_key is used if there are string usecols.
def resolve_push_to(push_to, default_url, default_namespace): protocol = 'http://' if push_to.startswith('http://') else 'https://' url = push_to = REMOVE_HTTP.sub('', push_to) namespace = default_namespace parts = url.split('/', 1) special_set = {'.', ':'} char_set = set([c for c in parts[0]]) ...
Given a push-to value, return the registry and namespace. :param push_to: string: User supplied --push-to value. :param default_url: string: Container engine's default_index value (e.g. docker.io). :return: tuple: registry_url, namespace
def MessageEncoder(field_number, is_repeated, is_packed): tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint assert not is_packed if is_repeated: def EncodeRepeatedField(write, value): for element in value: write(tag) local_EncodeVari...
Returns an encoder for a message field.
def load_path(self, path): containing_module, _, last_item = path.rpartition('.') if last_item[0].isupper(): path = containing_module imported_obj = importlib.import_module(path) if last_item[0].isupper(): try: imported_obj = getattr(imported_obj, ...
Load and return a given import path to a module or class
def get_id(self): if self.type == 'NAF': return self.node.get('id') elif self.type == 'KAF': return self.node.get('mid')
Returns the term identifier @rtype: string @return: the term identifier
def nunique(expr): output_type = types.int64 if isinstance(expr, SequenceExpr): return NUnique(_value_type=output_type, _inputs=[expr]) elif isinstance(expr, SequenceGroupBy): return GroupedNUnique(_data_type=output_type, _inputs=[expr.to_column()], _grouped=expr.input) elif isinstance(e...
The distinct count. :param expr: :return:
def buscar_por_ip_ambiente(self, ip, id_environment): if not is_valid_int_param(id_environment): raise InvalidParameterError( u'Environment identifier is invalid or was not informed.') if not is_valid_ip(ip): raise InvalidParameterError(u'IP is invalid or was not ...
Get IP with an associated environment. :param ip: IP address in the format x1.x2.x3.x4. :param id_environment: Identifier of the environment. Integer value and greater than zero. :return: Dictionary with the following structure: :: {'ip': {'id': < id >, 'id_vl...
def swipe(self): result = WBinArray(0, len(self)) for i in range(len(self)): result[len(self) - i - 1] = self[i] return result
Mirror current array value in reverse. Bits that had greater index will have lesser index, and vice-versa. This method doesn't change this array. It creates a new one and return it as a result. :return: WBinArray
def interpolate(self, other, t): return Vertex(self.pos.lerp(other.pos, t), self.normal.lerp(other.normal, t))
Create a new vertex between this vertex and `other` by linearly interpolating all properties using a parameter of `t`. Subclasses should override this to interpolate additional properties.
def visit_with(self, node): items = ", ".join( ("%s" % expr.accept(self)) + (vars and " as %s" % (vars.accept(self)) or "") for expr, vars in node.items ) return "with %s:\n%s" % (items, self._stmt_list(node.body))
return an astroid.With node as string
def upload_module(self, local_path=None, remote_path="/tmp/lime.ko"): if local_path is None: raise FileNotFoundFoundError(local_path) self.shell.upload_file(local_path, remote_path)
Upload LiME kernel module to remote host :type local_path: str :param local_path: local path to lime kernel module :type remote_path: str :param remote_path: remote path to upload lime kernel module
def _check_and_uninstall_python(ret, python, user=None): ret = _python_installed(ret, python, user=user) if ret['result']: if ret['default']: __salt__['pyenv.default']('system', runas=user) if __salt__['pyenv.uninstall_python'](python, runas=user): ret['result'] = True ...
Verify that python is uninstalled
def list(self, environment_vip=None): uri = 'api/networkv6/?' if environment_vip: uri += 'environment_vip=%s' % environment_vip return super(ApiNetworkIPv6, self).get(uri)
List networks redeipv6 ] :param environment_vip: environment vip to filter :return: IPv6 Networks
def flattenPorts(root: LNode): for u in root.children: u.west = _flattenPortsSide(u.west) u.east = _flattenPortsSide(u.east) u.north = _flattenPortsSide(u.north) u.south = _flattenPortsSide(u.south)
Flatten ports to simplify layout generation :attention: children property is destroyed, parent property stays same
def get_hkr_state(self): self.update() try: return { 126.5: 'off', 127.0: 'on', self.eco_temperature: 'eco', self.comfort_temperature: 'comfort' }[self.target_temperature] except KeyError: return ...
Get the thermostate state.
def exists(self, filename): result = True for repo in self._children: if not repo.exists(filename): result = False return result
Report whether a file exists on all distribution points. Determines file type by extension. Args: filename: Filename you wish to check. (No path! e.g.: "AdobeFlashPlayer-14.0.0.176.pkg") Returns: Boolean
def do_uninstall(ctx, verbose, fake): aliases = cli.list_commands(ctx) aliases.extend(['graft', 'harvest', 'sprout', 'resync', 'settings', 'install', 'uninstall']) for alias in aliases: system_command = 'git config --global --unset-all alias.{0}'.format(alias) verbose_echo(system_command, ve...
Uninstalls legit git aliases, including deprecated legit sub-commands.
def cart2dir(self,cart): cart=numpy.array(cart) rad=old_div(numpy.pi,180.) if len(cart.shape)>1: Xs,Ys,Zs=cart[:,0],cart[:,1],cart[:,2] else: Xs,Ys,Zs=cart[0],cart[1],cart[2] Rs=numpy.sqrt(Xs**2+Ys**2+Zs**2) Decs=(old_div(numpy.arctan2(Ys,Xs),rad))...
converts a direction to cartesian coordinates
def on_switch_page(self, notebook, page_pointer, page_num, user_param1=None): page = notebook.get_nth_page(page_num) for tab_info in list(self.tabs.values()): if tab_info['page'] is page: state_m = tab_info['state_m'] sm_id = state_m.state.get_state_machine()....
Update state selection when the active tab was changed
def draw_heading(self, writer): if self.dirty == self.STATE_REFRESH: writer(u''.join( (self.term.home, self.term.clear, self.screen.msg_intro, '\n', self.screen.header, '\n',))) return True
Conditionally redraw screen when ``dirty`` attribute is valued REFRESH. When Pager attribute ``dirty`` is ``STATE_REFRESH``, cursor is moved to (0,0), screen is cleared, and heading is displayed. :param writer: callable writes to output stream, receiving unicode. :returns: True if clas...
def include(gset, elem, value=True): add = getattr(gset, 'add', None) if add is None: add = getattr(gset, 'append', None) if add is not None: add(elem) else: if not hasattr(gset, '__setitem__'): raise Error("gset is not a supported container.") gset[elem] = value return e...
Do whatever it takes to make ``elem in gset`` true. >>> L, S, D = [ ], set(), { } >>> include(L, "Lucy"); include(S, "Sky"); include(D, "Diamonds"); >>> print L, S, D ['Lucy'] set(['Sky']) {'Diamonds': True} Works for sets (using ``add``), lists (using ``append``) and dicts (using ...
def _visit_handlers(handlers, visitor, prefix, suffixes): results = [] for handler in handlers: for suffix in suffixes: func = getattr(handler, '{}_{}'.format(prefix, suffix).lower(), None) if func: results.append(visitor(suffix, func)) return results
Use visitor partern to collect information from handlers
def receive(self, path, diff, showProgress=True): directory = os.path.dirname(path) cmd = ["btrfs", "receive", "-e", directory] if Store.skipDryRun(logger, self.dryrun)("Command: %s", cmd): return None if not os.path.exists(directory): os.makedirs(directory) ...
Return a context manager for stream that will store a diff.
def update_models(ctx, f=False): if f: manage(ctx, 'create_models_from_sql --force True', env={}) else: manage(ctx, 'create_models_from_sql', env={})
Updates local django db projects models using salic database from MinC
def shutdown(self, exitcode=0): logger.info("shutting down system stats and metadata service") self._system_stats.shutdown() self._meta.shutdown() if self._cloud: logger.info("stopping streaming files and file change observer") self._stop_file_observer() ...
Stops system stats, streaming handlers, and uploads files without output, used by wandb.monitor
def configuration_to_dict(handlers): config_dict = defaultdict(dict) for handler in handlers: for option in handler.set_options: value = _get_option(handler.target_obj, option) config_dict[handler.section_prefix][option] = value return config_dict
Returns configuration data gathered by given handlers as a dict. :param list[ConfigHandler] handlers: Handlers list, usually from parse_configuration() :rtype: dict
def serialize(self, subject, *objects_or_combinators): ec_s = rdflib.BNode() if self.operator is not None: if subject is not None: yield subject, self.predicate, ec_s yield from oc(ec_s) yield from self._list.serialize(ec_s, self.operator, *objects_or_...
object_combinators may also be URIRefs or Literals
def plotDutyCycles(dutyCycle, filePath): _,entropy = binaryEntropy(dutyCycle) bins = np.linspace(0.0, 0.3, 200) plt.hist(dutyCycle, bins, alpha=0.5, label='All cols') plt.title("Histogram of duty cycles, entropy=" + str(float(entropy))) plt.xlabel("Duty cycle") plt.ylabel("Number of units") plt.savefig(fi...
Create plot showing histogram of duty cycles :param dutyCycle: (torch tensor) the duty cycle of each unit :param filePath: (str) Full filename of image file
def resizeColumnsToContents(self, startCol=None, stopCol=None): numCols = self.model().columnCount() startCol = 0 if startCol is None else max(startCol, 0) stopCol = numCols if stopCol is None else min(stopCol, numCols) row = 0 for col in range(startCol, stopCol): in...
Resizes all columns to the contents
def query_term(self, term, verbose=False): if term not in self: sys.stderr.write("Term %s not found!\n" % term) return rec = self[term] if verbose: print(rec) sys.stderr.write("all parents: {}\n".format( repr(rec.get_all_parents()))...
Given a GO ID, return GO object.
def new(self, path, desc=None, bare=True): if os.path.exists(path): raise RepoError('Path already exists: %s' % path) try: os.mkdir(path) if bare: Repo.init_bare(path) else: Repo.init(path) repo = Local(path) ...
Create a new bare repo.Local instance. :param path: Path to new repo. :param desc: Repo description. :param bare: Create as bare repo. :returns: New repo.Local instance.
def use_google_symbol(fct): def decorator(symbols): google_symbols = [] if isinstance(symbols, str): symbols = [symbols] symbols = sorted(symbols) for symbol in symbols: dot_pos = symbol.find('.') google_symbols.append( symbol[:dot_...
Removes ".PA" or other market indicator from yahoo symbol convention to suit google convention
def get_currencies_info() -> Element: response = requests.get(const.CBRF_API_URLS['info']) return XML(response.text)
Get META information about currencies url: http://www.cbr.ru/scripts/XML_val.asp :return: :class: `Element <Element 'Valuta'>` object :rtype: ElementTree.Element
def _write(self, str_buf): self._filehandle.write(str_buf) self._buf_size += len(str_buf)
Uses the filehandle to the file in GCS to write to it.