code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def ratechangebase(self, ratefactor, current_base, new_base): if self._multiplier is None: self.log(logging.WARNING, "CurrencyHandler: changing base ourselves") if Decimal(1) != self.get_ratefactor(current_base, current_base): raise RuntimeError("CurrencyHandler: current baserate: %s not 1" % current_base) self._multiplier = Decimal(1) / self.get_ratefactor(current_base, new_base) return (ratefactor * self._multiplier).quantize(Decimal(".0001"))
Local helper function for changing currency base, returns new rate in new base Defaults to ROUND_HALF_EVEN
def _extract_peaks(specgram, neighborhood, threshold): kernel = np.ones(shape=neighborhood) local_averages = convolve(specgram, kernel / kernel.sum(), mode="constant", cval=0) floor = (1 + threshold) * local_averages candidates = np.where(specgram > floor, specgram, 0) local_maximums = grey_dilation(candidates, footprint=kernel) peak_coords = np.argwhere(specgram == local_maximums) peaks = zip(peak_coords[:, 0], peak_coords[:, 1]) return peaks
Partition the spectrogram into subcells and extract peaks from each cell if the peak is sufficiently energetic compared to the neighborhood.
def OPERATING_SYSTEM(stats, info): info.append(('architecture', platform.machine().lower())) info.append(('distribution', "%s;%s" % (platform.linux_distribution()[0:2]))) info.append(('system', "%s;%s" % (platform.system(), platform.release())))
General information about the operating system. This is a flag you can pass to `Stats.submit()`.
def coerce(self, other, is_positive=True): if hasattr(other, 'get_domain') and hasattr(other, 'lower') and hasattr(other, 'upper'): if self.is_domain_equal(other): return other else: msg = "Cannot merge partial orders with different domains!" raise CellConstructionFailure(msg) if isinstance(other, LinearOrderedCell): raise NotImplemented("Please Implement me!") domain = self.get_domain() if other in domain: c = self.__class__() if not is_positive: c.lower = set([other]) c.upper = set() else: c.upper = set([other]) c.lower = set() return c else: raise CellConstructionFailure("Could not coerce value that is"+ " outside order's domain . (Other = %s) " % (str(other),))
Only copies a pointer to the new domain's cell
def authenticate_xmpp(self): self.request_sid() self.log.debug('Prepare the XMPP authentication') sasl = SASLClient( host=self.to, service='xmpp', username=self.jid, password=self.password ) sasl.choose_mechanism(self.server_auth, allow_anonymous=False) challenge = self.get_challenge(sasl.mechanism) response = sasl.process(base64.b64decode(challenge)) resp_root = self.send_challenge_response(response) success = self.check_authenticate_success(resp_root) if success is None and\ resp_root.find('{{{0}}}challenge'.format(XMPP_SASL_NS)) is not None: resp_root = self.send_challenge_response('') return self.check_authenticate_success(resp_root) return success
Authenticate the user to the XMPP server via the BOSH connection.
def _install_exception_handler(self): def handler(t, value, traceback): if self.args.verbose: sys.__excepthook__(t, value, traceback) else: sys.stderr.write('%s\n' % unicode(value).encode('utf-8')) sys.excepthook = handler
Installs a replacement for sys.excepthook, which handles pretty-printing uncaught exceptions.
def _serialize( self, array_parent, value, state ): if not value: return for i, item_value in enumerate(value): state.push_location(self._item_processor.element_path, i) item_element = self._item_processor.serialize(item_value, state) array_parent.append(item_element) state.pop_location()
Serialize the array value and add it to the array parent element.
def _update_ssl_config(opts): if opts['ssl'] in (None, False): opts['ssl'] = None return if opts['ssl'] is True: opts['ssl'] = {} return import ssl for key, prefix in (('cert_reqs', 'CERT_'), ('ssl_version', 'PROTOCOL_')): val = opts['ssl'].get(key) if val is None: continue if not isinstance(val, six.string_types) or not val.startswith(prefix) or not hasattr(ssl, val): message = 'SSL option \'{0}\' must be set to one of the following values: \'{1}\'.' \ .format(key, '\', \''.join([val for val in dir(ssl) if val.startswith(prefix)])) log.error(message) raise salt.exceptions.SaltConfigurationError(message) opts['ssl'][key] = getattr(ssl, val)
Resolves string names to integer constant in ssl configuration.
def load(dbname): db = Database(dbname) tables = get_table_list(db.cur) chains = 0 for name in tables: db._traces[name] = Trace(name=name, db=db) db._traces[name]._shape = get_shape(db.cur, name) setattr(db, name, db._traces[name]) db.cur.execute('SELECT MAX(trace) FROM [%s]' % name) chains = max(chains, db.cur.fetchall()[0][0] + 1) db.chains = chains db.trace_names = chains * [tables, ] db._state_ = {} return db
Load an existing SQLite database. Return a Database instance.
def _read_opt_none(self, code, *, desc): _type = self._read_opt_type(code) _size = self._read_unpack(1) _data = self._read_fileng(_size) opt = dict( desc=_IPv6_Opts_NULL.get(code, desc), type=_type, length=_size + 2, data=_data, ) return opt
Read IPv6_Opts unassigned options. Structure of IPv6_Opts unassigned options [RFC 8200]: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - - | Option Type | Opt Data Len | Option Data +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - - Octets Bits Name Description 0 0 ipv6_opts.opt.type Option Type 0 0 ipv6_opts.opt.type.value Option Number 0 0 ipv6_opts.opt.type.action Action (00-11) 0 2 ipv6_opts.opt.type.change Change Flag (0/1) 1 8 ipv6_opts.opt.length Length of Option Data 2 16 ipv6_opts.opt.data Option Data
def get_value(self, key): try: return self._dictionary[key] except KeyError: raise ConfigurationError("No such key - {}".format(key))
Get a value from the configuration.
def remove_this_opinion(self,opinion_id): for opi in self.get_opinions(): if opi.get_id() == opinion_id: self.node.remove(opi.get_node()) break
Removes the opinion for the given opinion identifier @type opinion_id: string @param opinion_id: the opinion identifier to be removed
def pretty_dict_str(d, indent=2): b = StringIO() write_pretty_dict_str(b, d, indent=indent) return b.getvalue()
shows JSON indented representation of d
def check_server(self): msg = 'API server not found. Please check your API url configuration.' try: response = self.session.head(self.domain) except Exception as e: raise_from(errors.ServerError(msg), e) try: self._check_response(response) except errors.NotFound as e: raise raise_from(errors.ServerError(msg), e)
Checks if the server is reachable and throws and exception if it isn't
def tabs_obsolete(physical_line): r indent = INDENT_REGEX.match(physical_line).group(1) if '\t' in indent: return indent.index('\t'), "W191 indentation contains tabs"
r"""For new projects, spaces-only are strongly recommended over tabs. Okay: if True:\n return W191: if True:\n\treturn
def init(self): if not self.call_delegate('will_start_debug_core', core=self): if self.halt_on_connect: self.halt() self._read_core_type() self._check_for_fpu() self.build_target_xml() self.sw_bp.init() self.call_delegate('did_start_debug_core', core=self)
Cortex M initialization. The bus must be accessible when this method is called.
def run_epoch(self, epoch_info: EpochInfo, source: 'vel.api.Source'): epoch_info.on_epoch_begin() lr = epoch_info.optimizer.param_groups[-1]['lr'] print("|-------- Epoch {:06} Lr={:.6f} ----------|".format(epoch_info.global_epoch_idx, lr)) self.train_epoch(epoch_info, source) epoch_info.result_accumulator.freeze_results('train') self.validation_epoch(epoch_info, source) epoch_info.result_accumulator.freeze_results('val') epoch_info.on_epoch_end()
Run full epoch of learning
def _fetch_transfer_spec(self, node_action, token, bucket_name, paths): aspera_access_key, aspera_secret_key, ats_endpoint = self._get_aspera_metadata(bucket_name) _headers = {'accept': "application/json", 'Content-Type': "application/json"} credentials = {'type': 'token', 'token': {'delegated_refresh_token': token}} _url = ats_endpoint _headers['X-Aspera-Storage-Credentials'] = json.dumps(credentials) _data = {'transfer_requests': [ {'transfer_request': {'paths': paths, 'tags': {'aspera': { 'node': {'storage_credentials': credentials}}}}}]} _session = requests.Session() _response = _session.post(url=_url + "/files/" + node_action, auth=(aspera_access_key, aspera_secret_key), headers=_headers, json=_data, verify=self._config.verify_ssl) return _response
make hhtp call to Aspera to fetch back trasnfer spec
def create_jinja_environment(self) -> Environment: options = dict(self.jinja_options) if 'autoescape' not in options: options['autoescape'] = self.select_jinja_autoescape if 'auto_reload' not in options: options['auto_reload'] = self.config['TEMPLATES_AUTO_RELOAD'] or self.debug jinja_env = self.jinja_environment(self, **options) jinja_env.globals.update({ 'config': self.config, 'g': g, 'get_flashed_messages': get_flashed_messages, 'request': request, 'session': session, 'url_for': url_for, }) jinja_env.filters['tojson'] = tojson_filter return jinja_env
Create and return the jinja environment. This will create the environment based on the :attr:`jinja_options` and configuration settings. The environment will include the Quart globals by default.
def get_workspace_disk_usage(workspace, summarize=False): command = ['du', '-h'] if summarize: command.append('-s') else: command.append('-a') command.append(workspace) disk_usage_info = subprocess.check_output(command).decode().split() filesize_pairs = list(zip(disk_usage_info[::2], disk_usage_info[1::2])) filesizes = [] for filesize_pair in filesize_pairs: size, name = filesize_pair filesizes.append({'name': name[len(workspace):], 'size': size}) return filesizes
Retrieve disk usage information of a workspace.
def fit(self, images, reference=None): images = check_images(images) reference = check_reference(images, reference) def func(item): key, image = item return asarray([key, self._get(image, reference)]) transformations = images.map(func, with_keys=True).toarray() if images.shape[0] == 1: transformations = [transformations] algorithm = self.__class__.__name__ return RegistrationModel(dict(transformations), algorithm=algorithm)
Estimate registration model using cross-correlation. Use cross correlation to compute displacements between images or volumes and reference. Displacements will be 2D for images and 3D for volumes. Parameters ---------- images : array-like or thunder images The sequence of images / volumes to register. reference : array-like A reference image to align to.
def github_request(self, path, callback, access_token=None, post_args=None, **kwargs): url = self._API_URL + path all_args = {} if access_token: all_args["access_token"] = access_token all_args.update(kwargs) if all_args: url += "?" + auth.urllib_parse.urlencode(all_args) callback = self.async_callback(self._on_github_request, callback) http = self._get_auth_http_client() if post_args is not None: http.fetch(url, method="POST", user_agent='Tinman/Tornado', body=auth.urllib_parse.urlencode(post_args), callback=callback) else: http.fetch(url, user_agent='Tinman/Tornado', callback=callback)
Make a request to the GitHub API, passing in the path, a callback, the access token, optional post arguments and keyword arguments to be added as values in the request body or URI
def compute_threat_list_diff( self, threat_type, constraints, version_token=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "compute_threat_list_diff" not in self._inner_api_calls: self._inner_api_calls[ "compute_threat_list_diff" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.compute_threat_list_diff, default_retry=self._method_configs["ComputeThreatListDiff"].retry, default_timeout=self._method_configs["ComputeThreatListDiff"].timeout, client_info=self._client_info, ) request = webrisk_pb2.ComputeThreatListDiffRequest( threat_type=threat_type, constraints=constraints, version_token=version_token, ) return self._inner_api_calls["compute_threat_list_diff"]( request, retry=retry, timeout=timeout, metadata=metadata )
Gets the most recent threat list diffs. Example: >>> from google.cloud import webrisk_v1beta1 >>> from google.cloud.webrisk_v1beta1 import enums >>> >>> client = webrisk_v1beta1.WebRiskServiceV1Beta1Client() >>> >>> # TODO: Initialize `threat_type`: >>> threat_type = enums.ThreatType.THREAT_TYPE_UNSPECIFIED >>> >>> # TODO: Initialize `constraints`: >>> constraints = {} >>> >>> response = client.compute_threat_list_diff(threat_type, constraints) Args: threat_type (~google.cloud.webrisk_v1beta1.types.ThreatType): Required. The ThreatList to update. constraints (Union[dict, ~google.cloud.webrisk_v1beta1.types.Constraints]): The constraints associated with this request. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.webrisk_v1beta1.types.Constraints` version_token (bytes): The current version token of the client for the requested list (the client version that was received from the last successful diff). retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.webrisk_v1beta1.types.ComputeThreatListDiffResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def prange(*x): try: root = logging.getLogger() if len(root.handlers): for h in root.handlers: if (type(h) is logging.StreamHandler) and \ (h.level != logging.CRITICAL): from tqdm import tqdm return tqdm(range(*x)) return range(*x) else: from tqdm import tqdm return tqdm(range(*x)) except ImportError: return range(*x)
Progress bar range with `tqdm`
def _is_nanpa_number_with_national_prefix(self): return (self._current_metadata.country_code == 1 and self._national_number[0] == '1' and self._national_number[1] != '0' and self._national_number[1] != '1')
Returns true if the current country is a NANPA country and the national number begins with the national prefix.
def native_types(code): out = [] for c in code: if isconstant(c, quoted=True): if isstring(c, quoted=True): v = c[1:-1] elif isbool(c): v = to_bool(c) elif isnumber(c): v = c else: raise CompileError("Unknown type %s: %s" % (type(c).__name__, c)) out.append(make_embedded_push(v)) else: try: out.append(instructions.lookup(c)) except KeyError: raise CompileError("Unknown word '%s'" % c) return out
Convert code elements from strings to native Python types.
def add_sam2rnf_parser(subparsers, subcommand, help, description, simulator_name=None): parser_sam2rnf = subparsers.add_parser(subcommand, help=help, description=description) parser_sam2rnf.set_defaults(func=sam2rnf) parser_sam2rnf.add_argument( '-s', '--sam', type=str, metavar='file', dest='sam_fn', required=True, help='Input SAM/BAM with true (expected) alignments of the reads (- for standard input).' ) _add_shared_params(parser_sam2rnf, unmapped_switcher=True) parser_sam2rnf.add_argument( '-n', '--simulator-name', type=str, metavar='str', dest='simulator_name', default=simulator_name, help='Name of the simulator (for RNF).' if simulator_name is not None else argparse.SUPPRESS, )
Add another parser for a SAM2RNF-like command. Args: subparsers (subparsers): File name of the genome from which read tuples are created (FASTA file). simulator_name (str): Name of the simulator used in comments.
def move_dir( src_fs, src_path, dst_fs, dst_path, workers=0, ): def src(): return manage_fs(src_fs, writeable=False) def dst(): return manage_fs(dst_fs, create=True) with src() as _src_fs, dst() as _dst_fs: with _src_fs.lock(), _dst_fs.lock(): _dst_fs.makedir(dst_path, recreate=True) copy_dir(src_fs, src_path, dst_fs, dst_path, workers=workers) _src_fs.removetree(src_path)
Move a directory from one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (instance or URL). src_path (str): Path to a directory on ``src_fs`` dst_fs (FS or str): Destination filesystem (instance or URL). dst_path (str): Path to a directory on ``dst_fs``. workers (int): Use `worker` threads to copy data, or ``0`` (default) for a single-threaded copy.
def read_first_available_value(filename, field_name): if not os.path.exists(filename): return None with open(filename, 'rb') as csvfile: reader = csv.DictReader(csvfile) for row in reader: value = row.get(field_name) if value: return value return None
Reads the first assigned value of the given field in the CSV table.
def start_aeidon(): extensions = ['ass', 'srt', 'ssa', 'sub'] Config.filenames = prep_files(Config.args, extensions) Config.patterns = pattern_logic_aeidon() for filename in Config.filenames: AeidonProject(filename)
Prepare filenames and patterns then process subtitles with aeidon.
def ColorWithLightness(self, lightness): h, s, l = self.__hsl return Color((h, s, lightness), 'hsl', self.__a, self.__wref)
Create a new instance based on this one with a new lightness value. Parameters: :lightness: The lightness of the new color [0...1]. Returns: A grapefruit.Color instance. >>> Color.NewFromHsl(30, 1, 0.5).ColorWithLightness(0.25) (0.5, 0.25, 0.0, 1.0) >>> Color.NewFromHsl(30, 1, 0.5).ColorWithLightness(0.25).hsl (30, 1, 0.25)
def return_error(self, status, payload=None): resp = None if payload is not None: payload = json.dumps(payload) resp = self.make_response(payload, status=status) if status in [405]: abort(status) else: abort(status, response=resp)
Error handler called by request handlers when an error occurs and the request should be aborted. Usage:: def handle_post_request(self, *args, **kwargs): self.request_handler = self.get_request_handler() try: self.request_handler.process(self.get_data()) except SomeException as e: self.return_error(400, payload=self.request_handler.errors) return self.return_create_response()
def cmd(send, msg, args): if not msg: send("Google what?") return key = args['config']['api']['googleapikey'] cx = args['config']['api']['googlesearchid'] data = get('https://www.googleapis.com/customsearch/v1', params={'key': key, 'cx': cx, 'q': msg}).json() if 'items' not in data: send("Google didn't say much.") else: url = data['items'][0]['link'] send("Google says %s" % url)
Googles something. Syntax: {command} <term>
def get_input_vector(self, ind): dim = self.get_dimension() b = fasttext.Vector(dim) self.f.getInputVector(b, ind) return np.array(b)
Given an index, get the corresponding vector of the Input Matrix.
def pool_to_HW(shape, data_frmt): if len(shape) != 4: return shape if data_frmt == 'NCHW': return [shape[2], shape[3]] return [shape[1], shape[2]]
Convert from NHWC|NCHW => HW
def visit_while(self, node): whiles = "while %s:\n%s" % (node.test.accept(self), self._stmt_list(node.body)) if node.orelse: whiles = "%s\nelse:\n%s" % (whiles, self._stmt_list(node.orelse)) return whiles
return an astroid.While node as string
def add_request_handler(self, request_handler): if request_handler is None: raise RuntimeConfigException( "Valid Request Handler instance to be provided") if not isinstance(request_handler, AbstractRequestHandler): raise RuntimeConfigException( "Input should be a RequestHandler instance") self.request_handler_chains.append(GenericRequestHandlerChain( request_handler=request_handler))
Register input to the request handlers list. :param request_handler: Request Handler instance to be registered. :type request_handler: AbstractRequestHandler :return: None
def run(self): try: chunk_index = self.chunk_index_gen(self.array.shape, self.iteration_order) for key in chunk_index: if self.masked: data = self.array[key].masked_array() else: data = self.array[key].ndarray() output_chunk = Chunk(key, data) self.output(output_chunk) except: self.abort() raise else: for queue in self.output_queues: queue.put(QUEUE_FINISHED)
Emit the Chunk instances which cover the underlying Array. The Array is divided into chunks with a size limit of MAX_CHUNK_SIZE which are emitted into all registered output queues.
def register(self, table): if table.table_type.is_system: raise ValueError('Cannot add system table to catalog') if not table.table_type.is_shared: raise ValueError('Cannot add local table to catalog') if table.is_substitute: raise ValueError('Cannot add substitute table to catalog') versions = self.__tables.get(table.name) if versions is None: versions = {} self.__tables[table.name] = versions versions[table.version] = table
Adds a shared table to the catalog. Args: table (SymbolTable): A non-system, shared symbol table.
def serial_adapters(self, serial_adapters): self._serial_adapters.clear() for _ in range(0, serial_adapters): self._serial_adapters.append(SerialAdapter(interfaces=4)) log.info('IOU "{name}" [{id}]: number of Serial adapters changed to {adapters}'.format(name=self._name, id=self._id, adapters=len(self._serial_adapters))) self._adapters = self._ethernet_adapters + self._serial_adapters
Sets the number of Serial adapters for this IOU VM. :param serial_adapters: number of adapters
def delete_collection(self, collection): uri = str.join('/', [self.uri, collection]) return self.service._delete(uri)
Deletes an existing collection. The collection being updated *is* expected to include the id.
def RetrieveAsset(logdir, plugin_name, asset_name): asset_path = os.path.join(PluginDirectory(logdir, plugin_name), asset_name) try: with tf.io.gfile.GFile(asset_path, "r") as f: return f.read() except tf.errors.NotFoundError: raise KeyError("Asset path %s not found" % asset_path) except tf.errors.OpError as e: raise KeyError("Couldn't read asset path: %s, OpError %s" % (asset_path, e))
Retrieve a particular plugin asset from a logdir. Args: logdir: A directory that was created by a TensorFlow summary.FileWriter. plugin_name: The plugin we want an asset from. asset_name: The name of the requested asset. Returns: string contents of the plugin asset. Raises: KeyError: if the asset does not exist.
def extract_paths(self, paths, ignore_nopath): try: super().extract_paths( paths=paths, ignore_nopath=ignore_nopath, ) except ExtractPathError as err: LOGGER.debug( '%s: failed extracting files: %s', self.vm.name(), err.message ) if self._has_guestfs: self.extract_paths_dead(paths, ignore_nopath) else: raise
Extract the given paths from the domain Attempt to extract all files defined in ``paths`` with the method defined in :func:`~lago.plugins.vm.VMProviderPlugin.extract_paths`, if it fails, and `guestfs` is available it will try extracting the files with guestfs. Args: paths(list of tuples): files to extract in `[(src1, dst1), (src2, dst2)...]` format. ignore_nopath(boolean): if True will ignore none existing paths. Returns: None Raises: :exc:`~lago.plugins.vm.ExtractPathNoPathError`: if a none existing path was found on the VM, and `ignore_nopath` is False. :exc:`~lago.plugins.vm.ExtractPathError`: on all other failures.
def useThis(self, *args, **kwargs): self._callback = functools.partial(self._callback, *args, **kwargs)
Change parameter of the callback function. :param *args, **kwargs: parameter(s) to use when executing the callback function.
def show_diff(before_editing, after_editing): def listify(string): return [l+'\n' for l in string.rstrip('\n').split('\n')] unified_diff = difflib.unified_diff(listify(before_editing), listify(after_editing)) if sys.stdout.isatty(): buf = io.StringIO() for line in unified_diff: buf.write(text_type(line)) buf.seek(0) class opts: side_by_side = False width = 80 tab_width = 8 cdiff.markup_to_pager(cdiff.PatchStream(buf), opts) else: for line in unified_diff: click.echo(line.rstrip('\n'))
Shows a diff between two strings. If the output is to a tty the diff will be colored. Inputs are expected to be unicode strings.
def namespace_uri(self): try: return next( iter(filter(lambda uri: URI(uri).namespace, self._uri)) ) except StopIteration: return None
Finds and returns first applied URI of this node that has a namespace. :return str: uri
def params(self): params, complex = self.get_params() url_params = self.default_params.copy() url_params.update(self.serialize_params(params, complex)) return url_params
URL parameters for wq.io.loaders.NetLoader
def load_ipython_extension(ipython): from google.cloud.bigquery.magics import _cell_magic ipython.register_magic_function( _cell_magic, magic_kind="cell", magic_name="bigquery" )
Called by IPython when this module is loaded as an IPython extension.
def _create_local_driver(self): driver_type = self.config.get('Driver', 'type') driver_name = driver_type.split('-')[0] if driver_name in ('android', 'ios', 'iphone'): driver = self._setup_appium() else: driver_setup = { 'firefox': self._setup_firefox, 'chrome': self._setup_chrome, 'safari': self._setup_safari, 'opera': self._setup_opera, 'iexplore': self._setup_explorer, 'edge': self._setup_edge, 'phantomjs': self._setup_phantomjs } driver_setup_method = driver_setup.get(driver_name) if not driver_setup_method: raise Exception('Unknown driver {0}'.format(driver_name)) capabilities = self._get_capabilities_from_driver_type(driver_name) self._add_capabilities_from_properties(capabilities, 'Capabilities') driver = driver_setup_method(capabilities) return driver
Create a driver in local machine :returns: a new local selenium driver
def fro(self, statement): if not self.name_format: return self.fail_safe_fro(statement) result = {} for attribute in statement.attribute: if attribute.name_format and self.name_format and \ attribute.name_format != self.name_format: continue try: (key, val) = self.ava_from(attribute) except (KeyError, AttributeError): pass else: result[key] = val return result
Get the attributes and the attribute values. :param statement: The AttributeStatement. :return: A dictionary containing attributes and values
def reset(self, clear=False): if self._executing: self._executing = False self._request_info['execute'] = {} self._reading = False self._highlighter.highlighting_on = False if clear: self._control.clear() if self._display_banner: if self.kernel_banner: self._append_plain_text(self.kernel_banner) self._append_plain_text(self.banner) self._show_interpreter_prompt()
Overridden to customize the order that the banners are printed
def xor(a, b): return bytearray(i ^ j for i, j in zip(a, b))
Bitwise xor on equal length bytearrays.
def add_column(filename,column,formula,force=False): columns = parse_formula(formula) logger.info("Running file: %s"%filename) logger.debug(" Reading columns: %s"%columns) data = fitsio.read(filename,columns=columns) logger.debug(' Evaluating formula: %s'%formula) col = eval(formula) col = np.asarray(col,dtype=[(column,col.dtype)]) insert_columns(filename,col,force=force) return True
Add a column to a FITS file. ADW: Could this be replaced by a ftool?
def check_buffer(coords, length, buffer): s = min(coords[0], buffer) e = min(length - coords[1], buffer) return [s, e]
check to see how much of the buffer is being used
def history_search_backward(self, e): u self.l_buffer=self._history.history_search_backward(self.l_buffer)
u'''Search backward through the history for the string of characters between the start of the current line and the point. This is a non-incremental search. By default, this command is unbound.
def calc_bin(self, _bin=None): if _bin is None: try: _bin = bins.bins(self.start, self.end, one=True) except TypeError: _bin = None return _bin
Calculate the smallest UCSC genomic bin that will contain this feature.
def get_endpoints_using_raw_json_emission(domain): uri = "http://{0}/data.json".format(domain) r = requests.get(uri) r.raise_for_status() return r.json()
Implements a raw HTTP GET against the entire Socrata portal for the domain in question. This method uses the first of the two ways of getting this information, the raw JSON endpoint. Parameters ---------- domain: str A Socrata data portal domain. "data.seattle.gov" or "data.cityofnewyork.us" for example. Returns ------- Portal dataset metadata from the JSON endpoint.
def encode(cinfo, cio, image): argtypes = [ctypes.POINTER(CompressionInfoType), ctypes.POINTER(CioType), ctypes.POINTER(ImageType)] OPENJPEG.opj_encode.argtypes = argtypes OPENJPEG.opj_encode.restype = ctypes.c_int status = OPENJPEG.opj_encode(cinfo, cio, image) return status
Wrapper for openjpeg library function opj_encode. Encodes an image into a JPEG-2000 codestream. Parameters ---------- cinfo : compression handle cio : output buffer stream image : image to encode
def _joint_log_likelihood(self, X): check_is_fitted(self, "classes_") X = check_array(X, accept_sparse='csr') X_bin = self._transform_data(X) n_classes, n_features = self.feature_log_prob_.shape n_samples, n_features_X = X_bin.shape if n_features_X != n_features: raise ValueError( "Expected input with %d features, got %d instead" % (n_features, n_features_X)) jll = safe_sparse_dot(X_bin, self.feature_log_prob_.T) jll += self.class_log_prior_ return jll
Calculate the posterior log probability of the samples X
def rhsm_register(self, rhsm): login = rhsm.get('login') password = rhsm.get('password', os.environ.get('RHN_PW')) pool_id = rhsm.get('pool_id') self.run('rm /etc/pki/product/69.pem', ignore_error=True) custom_log = 'subscription-manager register --username %s --password *******' % login self.run( 'subscription-manager register --username %s --password "%s"' % ( login, password), success_status=(0, 64), custom_log=custom_log, retry=3) if pool_id: self.run('subscription-manager attach --pool %s' % pool_id) else: self.run('subscription-manager attach --auto') self.rhsm_active = True
Register the host on the RHSM. :param rhsm: a dict of parameters (login, password, pool_id)
def allowed_methods(self, path_info=None): try: self.match(path_info, method="--") except MethodNotAllowed as e: return e.valid_methods except HTTPException: pass return []
Returns the valid methods that match for a given path. .. versionadded:: 0.7
def _patch_expand_path(self, settings, name, value): if os.path.isabs(value): return os.path.normpath(value) value = os.path.expanduser(value) if not os.path.isabs(value) and self.projectdir: value = os.path.join(self.projectdir, value) return os.path.normpath(value)
Patch a path to expand home directory and make absolute path. Args: settings (dict): Current settings. name (str): Setting name. value (str): Path to patch. Returns: str: Patched path to an absolute path.
def set_layout_settings(self, settings): size = settings.get('size') if size is not None: self.resize( QSize(*size) ) self.window_size = self.size() pos = settings.get('pos') if pos is not None: self.move( QPoint(*pos) ) hexstate = settings.get('hexstate') if hexstate is not None: self.restoreState( QByteArray().fromHex( str(hexstate).encode('utf-8')) ) if settings.get('is_maximized'): self.setWindowState(Qt.WindowMaximized) if settings.get('is_fullscreen'): self.setWindowState(Qt.WindowFullScreen) splitsettings = settings.get('splitsettings') if splitsettings is not None: self.editorwidget.editorsplitter.set_layout_settings(splitsettings)
Restore layout state
def add_entries_to_gallery(app, doctree, docname): if docname != 'gallery': return if not has_gallery(app.builder.name): return try: node = doctree.traverse(gallery)[0] except TypeError: return content = [] for entry in app.env.gallery_entries: raw_html_node = nodes.raw('', text=entry.html, format='html') content.append(raw_html_node) node.replace_self(content)
Add entries to the gallery node Should happen when all the doctrees have been read and the gallery entries have been collected. i.e at doctree-resolved time.
def open_python(self, message, namespace): from code import InteractiveConsole import readline import rlcompleter readline.set_completer(rlcompleter.Completer(namespace).complete) readline.parse_and_bind('tab: complete') console = InteractiveConsole(namespace) console.interact(message)
Open interactive python console
def get_product_version(path: typing.Union[str, Path]) -> VersionInfo: path = Path(path).absolute() pe_info = pefile.PE(str(path)) try: for file_info in pe_info.FileInfo: if isinstance(file_info, list): result = _parse_file_info(file_info) if result: return result else: result = _parse_file_info(pe_info.FileInfo) if result: return result raise RuntimeError(f'unable to obtain version from {path}') except (KeyError, AttributeError) as exc: traceback.print_exc() raise RuntimeError(f'unable to obtain version from {path}') from exc
Get version info from executable Args: path: path to the executable Returns: VersionInfo
def append(self, function, update=True): self._funcs.append(function) self._add_dep(function) if update: self._update()
Append a new function to the end of this chain.
def encodeMsg(self, mesg): fmt = self.locs.get('log:fmt') if fmt == 'jsonl': s = json.dumps(mesg, sort_keys=True) + '\n' buf = s.encode() return buf elif fmt == 'mpk': buf = s_msgpack.en(mesg) return buf mesg = f'Unknown encoding format: {fmt}' raise s_exc.SynErr(mesg=mesg)
Get byts for a message
def load_embedding(path): EMBEDDING_DIM = 300 embedding_dict = {} with open(path, 'r', encoding='utf-8') as file: pairs = [line.strip('\r\n').split() for line in file.readlines()] for pair in pairs: if len(pair) == EMBEDDING_DIM + 1: embedding_dict[pair[0]] = [float(x) for x in pair[1:]] logger.debug('embedding_dict size: %d', len(embedding_dict)) return embedding_dict
return embedding for a specific file by given file path.
def _is_modified(self, filepath): if self._is_new(filepath): return False mtime = self._get_modified_time(filepath) return self._watched_files[filepath] < mtime
Returns True if the file has been modified since last seen. Will return False if the file has not been seen before.
def setup_logfile_raw(self, logfile, mode='w'): self.logfile_raw = open(logfile, mode=mode)
start logging raw bytes to the given logfile, without timestamps
def status(self): if self.report == None: return SentSms.ENROUTE else: return SentSms.DELIVERED if self.report.deliveryStatus == StatusReport.DELIVERED else SentSms.FAILED
Status of this SMS. Can be ENROUTE, DELIVERED or FAILED The actual status report object may be accessed via the 'report' attribute if status is 'DELIVERED' or 'FAILED'
def declare_param(self, id_, lineno, type_=None): if not self.check_is_undeclared(id_, lineno, classname='parameter', scope=self.current_scope, show_error=True): return None entry = self.declare(id_, lineno, symbols.PARAMDECL(id_, lineno, type_)) if entry is None: return entry.declared = True if entry.type_.implicit: warning_implicit_type(lineno, id_, type_) return entry
Declares a parameter Check if entry.declared is False. Otherwise raises an error.
def html_to_xhtml(html_unicode_string): try: assert isinstance(html_unicode_string, basestring) except AssertionError: raise TypeError root = BeautifulSoup(html_unicode_string, 'html.parser') try: assert root.html is not None except AssertionError: raise ValueError(''.join(['html_unicode_string cannot be a fragment.', 'string is the following: %s', unicode(root)])) root.html['xmlns'] = 'http://www.w3.org/1999/xhtml' unicode_string = unicode(root.prettify(encoding='utf-8', formatter='html'), encoding='utf-8') for tag in constants.SINGLETON_TAG_LIST: unicode_string = unicode_string.replace( '<' + tag + '/>', '<' + tag + ' />') return unicode_string
Converts html to xhtml Args: html_unicode_string: A (possible unicode) string representing HTML. Returns: A (possibly unicode) string representing XHTML. Raises: TypeError: Raised if input_string isn't a unicode string or string.
def layers(self, rev=True): image_layers = [ PodmanImage(None, identifier=x, pull_policy=PodmanImagePullPolicy.NEVER) for x in self.get_layer_ids() ] if not rev: image_layers.reverse() return image_layers
Get list of PodmanImage for every layer in image :param rev: get layers rev :return: list of :class:`conu.PodmanImage`
def copy(self): copyClass = self.copyClass if copyClass is None: copyClass = self.__class__ copied = copyClass() copied.copyData(self) return copied
Copy this object into a new object of the same type. The returned object will not have a parent object.
def tsiterator(ts, dateconverter=None, desc=None, clean=False, start_value=None, **kwargs): dateconverter = dateconverter or default_converter yield ['Date'] + ts.names() if clean == 'full': for dt, value in full_clean(ts, dateconverter, desc, start_value): yield (dt,) + tuple(value) else: if clean: ts = ts.clean() for dt, value in ts.items(desc=desc, start_value=start_value): dt = dateconverter(dt) yield (dt,) + tuple(value)
An iterator of timeseries as tuples.
def add_args(parser, positional=False): group = parser.add_argument_group("read loading") group.add_argument("reads" if positional else "--reads", nargs="+", default=[], help="Paths to bam files. Any number of paths may be specified.") group.add_argument( "--read-source-name", nargs="+", help="Names for each read source. The number of names specified " "must match the number of bam files. If not specified, filenames are " "used for names.") group = parser.add_argument_group( "read filtering", "A number of read filters are available. See the pysam " "documentation (http://pysam.readthedocs.org/en/latest/api.html) " "for details on what these fields mean. When multiple filter " "options are specified, reads must match *all* filters.") for (name, (kind, message, function)) in READ_FILTERS.items(): extra = {} if kind is bool: extra["action"] = "store_true" extra["default"] = None elif kind is int: extra["type"] = int extra["metavar"] = "N" elif kind is str: extra["metavar"] = "STRING" group.add_argument("--" + name.replace("_", "-"), help=message, **extra)
Extends a commandline argument parser with arguments for specifying read sources.
def enum(self): value = self._schema.get("enum", None) if value is None: return if not isinstance(value, list): raise SchemaError( "enum value {0!r} is not a list".format(value)) if len(value) == 0: raise SchemaError( "enum value {0!r} does not contain any" " elements".format(value)) seen = set() for item in value: if item in seen: raise SchemaError( "enum value {0!r} contains duplicate element" " {1!r}".format(value, item)) else: seen.add(item) return value
Enumeration of allowed object values. The enumeration must not contain duplicates.
def append_json( self, obj: Any, headers: Optional['MultiMapping[str]']=None ) -> Payload: if headers is None: headers = CIMultiDict() return self.append_payload(JsonPayload(obj, headers=headers))
Helper to append JSON part.
def select(self, *properties, **aliased_properties): if not (properties or aliased_properties): return self merged_properties = dict(zip(properties, properties)) merged_properties.update(aliased_properties) for prop_name in (merged_properties.keys()): if prop_name in self.selection: raise Exception('The property {} has already been selected'.format(prop_name)) new_selection = self.selection.copy() new_selection.update(merged_properties) return self._copy(selection=new_selection)
Specify which properties of the dataset must be returned Property extraction is based on `JMESPath <http://jmespath.org>`_ expressions. This method returns a new Dataset narrowed down by the given selection. :param properties: JMESPath to use for the property extraction. The JMESPath string will be used as a key in the output dictionary. :param aliased_properties: Same as properties, but the output dictionary will contain the parameter name instead of the JMESPath string.
def getEvoBibAsBibtex(*keys, **kw): res = [] for key in keys: bib = get_url( "http://bibliography.lingpy.org/raw.php?key=" + key, log=kw.get('log')).text try: res.append('@' + bib.split('@')[1].split('</pre>')[0]) except IndexError: res.append('@misc{' + key + ',\nNote={missing source}\n\n}') return '\n\n'.join(res)
Download bibtex format and parse it from EvoBib
def _charInfo(self, point, padding): print('{0:0>4X} '.format(point).rjust(padding), ud.name(chr(point), '<code point {0:0>4X}>'.format(point)))
Displays character info.
def get_recursive_subclasses(cls): return cls.__subclasses__() + [g for s in cls.__subclasses__() for g in get_recursive_subclasses(s)]
Return list of all subclasses for a class, including subclasses of direct subclasses
def fit_transform(self, input, **fit_kwargs): self.fit(input, **fit_kwargs) X = self.transform(input) return X
Execute fit and transform in sequence.
def cybox_valueset_fact_handler(self, enrichment, fact, attr_info, add_fact_kargs): value_list = attr_info['value_set'][fact['node_id']].split(",") value_list = map(lambda x: x.strip(), value_list) add_fact_kargs['values'] = value_list return True
Handler for dealing with 'value_set' values. Unfortunately, CybOX et al. sometimes use comma-separated value lists rather than an XML structure that can contain several values. This handler is called for elements concerning a value-set such as the following example:: <URIObj:Value condition="IsInSet" value_set="www.sample1.com/index.html, sample2.com/login.html, dev.sample3.com/index/kb.html" datatype="AnyURI"/>
def update_wrapper(self, process_list): self.set_count(len(process_list)) if self.should_update(): return self.update(process_list) else: return self.result()
Wrapper for the children update
def index(obj, index=INDEX_NAME, doc_type=DOC_TYPE): doc = to_dict(obj) if doc is None: return id = doc.pop('id') return es_conn.index(index, doc_type, doc, id=id)
Index the given document. https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.index https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html
def upload(self, filename, directory=None): filename = eval_path(filename) if directory is None: directory = self.downloads_directory res1 = self._req_upload(filename, directory) data1 = res1['data'] file_id = data1['file_id'] res2 = self._req_file(file_id) data2 = res2['data'][0] data2.update(**data1) return _instantiate_uploaded_file(self, data2)
Upload a file ``filename`` to ``directory`` :param str filename: path to the file to upload :param directory: destionation :class:`.Directory`, defaults to :attribute:`.API.downloads_directory` if None :return: the uploaded file :rtype: :class:`.File`
def modifyInPlace(self, *, sort=None, purge=False, done=None): self.data = self.modify(sort=sort, purge=purge, done=done)
Like Model.modify, but changes existing database instead of returning a new one.
def apply(coro, *args, **kw): assert_corofunction(coro=coro) @asyncio.coroutine def wrapper(*_args, **_kw): return (yield from coro(*args, **kw)) return wrapper
Creates a continuation coroutine function with some arguments already applied. Useful as a shorthand when combined with other control flow functions. Any arguments passed to the returned function are added to the arguments originally passed to apply. This is similar to `paco.partial()`. This function can be used as decorator. arguments: coro (coroutinefunction): coroutine function to wrap. *args (mixed): mixed variadic arguments for partial application. *kwargs (mixed): mixed variadic keyword arguments for partial application. Raises: TypeError: if coro argument is not a coroutine function. Returns: coroutinefunction: wrapped coroutine function. Usage:: async def hello(name, mark='!'): print('Hello, {name}{mark}'.format(name=name, mark=mark)) hello_mike = paco.apply(hello, 'Mike') await hello_mike() # => Hello, Mike! hello_mike = paco.apply(hello, 'Mike', mark='?') await hello_mike() # => Hello, Mike?
def document_create(index, doc_type, body=None, id=None, hosts=None, profile=None, source=None): es = _get_instance(hosts, profile) if source and body: message = 'Either body or source should be specified but not both.' raise SaltInvocationError(message) if source: body = __salt__['cp.get_file_str']( source, saltenv=__opts__.get('saltenv', 'base')) try: return es.index(index=index, doc_type=doc_type, body=body, id=id) except elasticsearch.TransportError as e: raise CommandExecutionError("Cannot create document in index {0}, server returned code {1} with message {2}".format(index, e.status_code, e.error))
Create a document in a specified index index Index name where the document should reside doc_type Type of the document body Document to store source URL of file specifying document to store. Cannot be used in combination with ``body``. id Optional unique document identifier for specified doc_type (empty for random) CLI example:: salt myminion elasticsearch.document_create testindex doctype1 '{}'
def max_enrichment(fg_vals, bg_vals, minbg=2): scores = np.hstack((fg_vals, bg_vals)) idx = np.argsort(scores) x = np.hstack((np.ones(len(fg_vals)), np.zeros(len(bg_vals)))) xsort = x[idx] l_fg = len(fg_vals) l_bg = len(bg_vals) m = 0 s = 0 for i in range(len(scores), 0, -1): bgcount = float(len(xsort[i:][xsort[i:] == 0])) if bgcount >= minbg: enr = (len(xsort[i:][xsort[i:] == 1]) / l_fg) / (bgcount / l_bg) if enr > m: m = enr s = scores[idx[i]] return m
Computes the maximum enrichment. Parameters ---------- fg_vals : array_like The list of values for the positive set. bg_vals : array_like The list of values for the negative set. minbg : int, optional Minimum number of matches in background. The default is 2. Returns ------- enrichment : float Maximum enrichment.
def _resolve_image(ret, image, client_timeout): image_id = __salt__['docker.resolve_image_id'](image) if image_id is False: if not __opts__['test']: try: pull_result = __salt__['docker.pull']( image, client_timeout=client_timeout, ) except Exception as exc: raise CommandExecutionError( 'Failed to pull {0}: {1}'.format(image, exc) ) else: ret['changes']['image'] = pull_result image_id = __salt__['docker.resolve_image_id'](image) if image_id is False: raise CommandExecutionError( 'Image \'{0}\' not present despite a docker pull ' 'raising no errors'.format(image) ) return image_id
Resolve the image ID and pull the image if necessary
def create(self, friendly_name=values.unset, domain_name=values.unset, disaster_recovery_url=values.unset, disaster_recovery_method=values.unset, recording=values.unset, secure=values.unset, cnam_lookup_enabled=values.unset): data = values.of({ 'FriendlyName': friendly_name, 'DomainName': domain_name, 'DisasterRecoveryUrl': disaster_recovery_url, 'DisasterRecoveryMethod': disaster_recovery_method, 'Recording': recording, 'Secure': secure, 'CnamLookupEnabled': cnam_lookup_enabled, }) payload = self._version.create( 'POST', self._uri, data=data, ) return TrunkInstance(self._version, payload, )
Create a new TrunkInstance :param unicode friendly_name: A string to describe the resource :param unicode domain_name: The unique address you reserve on Twilio to which you route your SIP traffic :param unicode disaster_recovery_url: The HTTP URL that we should call if an error occurs while sending SIP traffic towards your configured Origination URL :param unicode disaster_recovery_method: The HTTP method we should use to call the disaster_recovery_url :param TrunkInstance.RecordingSetting recording: The recording settings for the trunk :param bool secure: Whether Secure Trunking is enabled for the trunk :param bool cnam_lookup_enabled: Whether Caller ID Name (CNAM) lookup should be enabled for the trunk :returns: Newly created TrunkInstance :rtype: twilio.rest.trunking.v1.trunk.TrunkInstance
def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False
Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3)
def map_nested(function, data_struct, dict_only=False, map_tuple=False): if isinstance(data_struct, dict): return { k: map_nested(function, v, dict_only, map_tuple) for k, v in data_struct.items() } elif not dict_only: types = [list] if map_tuple: types.append(tuple) if isinstance(data_struct, tuple(types)): mapped = [map_nested(function, v, dict_only, map_tuple) for v in data_struct] if isinstance(data_struct, list): return mapped else: return tuple(mapped) return function(data_struct)
Apply a function recursively to each element of a nested data struct.
def GetAnalyzers(cls): for analyzer_name, analyzer_class in iter(cls._analyzer_classes.items()): yield analyzer_name, analyzer_class
Retrieves the registered analyzers. Yields: tuple: containing: str: the uniquely identifying name of the analyzer type: the analyzer class.
def initialise(): global settings, project_settings settings = Changes.load() project_settings = Project.load(GitHubRepository(auth_token=settings.auth_token))
Detects, prompts and initialises the project. Stores project and tool configuration in the `changes` module.
def full_size(self): self.dragpos = wx.Point(0, 0) self.zoom = 1.0 self.need_redraw = True
show image at full size