code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def view_contents(token, dstore): try: desc = dstore['oqparam'].description except KeyError: desc = '' data = sorted((dstore.getsize(key), key) for key in dstore) rows = [(key, humansize(nbytes)) for nbytes, key in data] total = '\n%s : %s' % ( dstore.filename, humansize(os.path.getsize(dstore.filename))) return rst_table(rows, header=(desc, '')) + total
Returns the size of the contents of the datastore and its total size
def list_instance_profiles(path_prefix='/', region=None, key=None, keyid=None, profile=None): p = get_all_instance_profiles(path_prefix, region, key, keyid, profile) return [i['instance_profile_name'] for i in p]
List all IAM instance profiles, starting at the optional path. .. versionadded:: 2016.11.0 CLI Example: salt-call boto_iam.list_instance_profiles
def wait_for_responses(self): self.thread.join(self.COMMAND_RESPONSE_TIMEOUT_S) self.running = False return self.responses
Block the thread and wait for the response to the given request to arrive from the VI. If no matching response is received in COMMAND_RESPONSE_TIMEOUT_S seconds, returns anyway.
def _is_valid_string(self, inpt, metadata): if not is_string(inpt): return False if metadata.get_minimum_string_length() and len(inpt) < metadata.get_minimum_string_length(): return False elif metadata.get_maximum_string_length() and len(inpt) > metadata.get_maximum_string_length(): return False if metadata.get_string_set() and inpt not in metadata.get_string_set(): return False else: return True
Checks if input is a valid string
def up_threshold(x, s, p): if 1.0 * x/s >= p: return True elif stat.binom_test(x, s, p) > 0.01: return True return False
function to decide if similarity is below cutoff
def re_enqueue(self, item): if 'retries' in item: retries = item['retries'] if retries >= self.MAX_RETRIES: log.warn("Failed to execute {} after {} retries, give it " " up.".format(item['method'], retries)) else: retries += 1 item['retries'] = retries self._q.put_nowait(item) else: item['retries'] = 1 self._q.put_nowait(item)
Re-enqueue till reach max retries.
def renames(from_path, to_path, user=None): to_dir = path.dirname(to_path) if to_dir: mkdir(to_dir, user=user) rename(from_path, to_path, user=user)
Rename ``from_path`` to ``to_path``, creating parents as needed.
def prune_cached(values): import os config_path = os.path.expanduser('~/.config/blockade') file_path = os.path.join(config_path, 'cache.txt') if not os.path.isfile(file_path): return values cached = [x.strip() for x in open(file_path, 'r').readlines()] output = list() for item in values: hashed = hash_values(item) if hashed in cached: continue output.append(item) return output
Remove the items that have already been cached.
def msquared(self, benchmark, rf=0.02, ddof=0): rf = self._validate_rf(rf) scaling = benchmark.anlzd_stdev(ddof) / self.anlzd_stdev(ddof) diff = self.anlzd_ret() - rf return rf + diff * scaling
M-squared, return scaled by relative total risk. A measure of what a portfolio would have returned if it had taken on the same *total* risk as the market index. [Source: CFA Institute] Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. rf : {float, TSeries, pd.Series}, default 0.02 If float, this represents an *compounded annualized* risk-free rate; 2.0% is the default. If a TSeries or pd.Series, this represents a time series of periodic returns to a risk-free security. To download a risk-free rate return series using 3-month US T-bill yields, see:`pyfinance.datasets.load_rf`. ddof : int, default 0 Degrees of freedom, passed to pd.Series.std(). Returns ------- float
def build_timeout_circuit(tor_state, reactor, path, timeout, using_guards=False): timed_circuit = [] d = tor_state.build_circuit(routers=path, using_guards=using_guards) def get_circuit(c): timed_circuit.append(c) return c def trap_cancel(f): f.trap(defer.CancelledError) if timed_circuit: d2 = timed_circuit[0].close() else: d2 = defer.succeed(None) d2.addCallback(lambda _: Failure(CircuitBuildTimedOutError("circuit build timed out"))) return d2 d.addCallback(get_circuit) d.addCallback(lambda circ: circ.when_built()) d.addErrback(trap_cancel) reactor.callLater(timeout, d.cancel) return d
Build a new circuit within a timeout. CircuitBuildTimedOutError will be raised unless we receive a circuit build result (success or failure) within the `timeout` duration. :returns: a Deferred which fires when the circuit build succeeds (or fails to build).
def _set_combobox(self, attrname, vals, default=0): combobox = getattr(self.w, attrname) for val in vals: combobox.append_text(val) if default > len(vals): default = 0 val = vals[default] combobox.show_text(val) return val
Populate combobox with given list.
def destroy(self): while True: try: client = self.__pool.popleft() if isinstance(client, Client): client.disconnect() except IndexError: break
Disconnects all pooled client objects.
def shuffle(self, overwrite=False): if overwrite: shuffled = self.path else: shuffled = FileAPI.add_ext_name(self.path, "_shuffled") lines = open(self.path).readlines() random.shuffle(lines) open(shuffled, "w").writelines(lines) self.path = shuffled
This method creates new shuffled file.
def get_templatetype(type_id,**kwargs): templatetype = db.DBSession.query(TemplateType).filter( TemplateType.id==type_id).options( joinedload_all("typeattrs")).one() return templatetype
Get a specific resource type by ID.
def _unpack_episode_title(element: ET.Element): return EpisodeTitle(title=element.text, lang=element.get(f'{XML}lang'))
Unpack EpisodeTitle from title XML element.
def read_ma_array(self, infile, var_name): file_obj = self.read_cdf(infile) data = file_obj.variables[var_name][:] try: import numpy as np except Exception: raise ImportError("numpy is required to return masked arrays.") if hasattr(file_obj.variables[var_name], "_FillValue"): fill_val = file_obj.variables[var_name]._FillValue retval = np.ma.masked_where(data == fill_val, data) else: retval = np.ma.array(data) return retval
Create a masked array based on cdf's FillValue
def _find_files(dirpath: str) -> 'Iterable[str]': for dirpath, dirnames, filenames in os.walk(dirpath, topdown=True, followlinks=True): if os.path.basename(dirpath).startswith('.'): del dirnames[:] for filename in filenames: yield os.path.join(dirpath, filename)
Find files recursively. Returns a generator that yields paths in no particular order.
def date_to_jd(year,month,day): if month == 1 or month == 2: yearp = year - 1 monthp = month + 12 else: yearp = year monthp = month if ((year < 1582) or (year == 1582 and month < 10) or (year == 1582 and month == 10 and day < 15)): B = 0 else: A = math.trunc(yearp / 100.) B = 2 - A + math.trunc(A / 4.) if yearp < 0: C = math.trunc((365.25 * yearp) - 0.75) else: C = math.trunc(365.25 * yearp) D = math.trunc(30.6001 * (monthp + 1)) jd = B + C + D + day + 1720994.5 return jd
Convert a date to Julian Day. Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet', 4th ed., Duffet-Smith and Zwart, 2011. Parameters ---------- year : int Year as integer. Years preceding 1 A.D. should be 0 or negative. The year before 1 A.D. is 0, 10 B.C. is year -9. month : int Month as integer, Jan = 1, Feb. = 2, etc. day : float Day, may contain fractional part. Returns ------- jd : float Julian Day Examples -------- Convert 6 a.m., February 17, 1985 to Julian Day >>> date_to_jd(1985,2,17.25) 2446113.75
def lex_document(self, document): location = self.editor_buffer.location if location: if self.editor_buffer.in_file_explorer_mode: return PygmentsLexer(DirectoryListingLexer, sync_from_start=False).lex_document(document) return PygmentsLexer.from_filename(location, sync_from_start=False).lex_document(document) return SimpleLexer().lex_document(document)
Call the lexer and return a get_tokens_for_line function.
def _assemble_regulate_amount(stmt): obj_str = _assemble_agent_str(stmt.obj) if stmt.subj is not None: subj_str = _assemble_agent_str(stmt.subj) if isinstance(stmt, ist.IncreaseAmount): rel_str = ' increases the amount of ' elif isinstance(stmt, ist.DecreaseAmount): rel_str = ' decreases the amount of ' stmt_str = subj_str + rel_str + obj_str else: if isinstance(stmt, ist.IncreaseAmount): stmt_str = obj_str + ' is produced' elif isinstance(stmt, ist.DecreaseAmount): stmt_str = obj_str + ' is degraded' return _make_sentence(stmt_str)
Assemble RegulateAmount statements into text.
def _read_hdf_columns(path_or_buf, columns, num_splits, kwargs): df = pandas.read_hdf(path_or_buf, columns=columns, **kwargs) return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
Use a Ray task to read columns from HDF5 into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path_or_buf: The path of the HDF5 file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index.
def move(self, auth, resource, destinationresource, options={"aliases": True}, defer=False): return self._call('move', auth, [resource, destinationresource, options], defer)
Moves a resource from one parent client to another. Args: auth: <cik> resource: Identifed resource to be moved. destinationresource: resource of client resource is being moved to.
def _len_lcs(x, y): table = _lcs(x, y) n, m = len(x), len(y) return table[n, m]
Returns the length of the Longest Common Subsequence between two seqs. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: sequence of words y: sequence of words Returns integer: Length of LCS between x and y
def error(self, id, errorCode, errorString): if errorCode == 165: sys.stderr.write("TWS INFO - %s: %s\n" % (errorCode, errorString)) elif errorCode >= 501 and errorCode < 600: sys.stderr.write("TWS CLIENT-ERROR - %s: %s\n" % (errorCode, errorString)) elif errorCode >= 100 and errorCode < 1100: sys.stderr.write("TWS ERROR - %s: %s\n" % (errorCode, errorString)) elif errorCode >= 1100 and errorCode < 2100: sys.stderr.write("TWS SYSTEM-ERROR - %s: %s\n" % (errorCode, errorString)) elif errorCode in (2104, 2106, 2108): sys.stderr.write("TWS INFO - %s: %s\n" % (errorCode, errorString)) elif errorCode >= 2100 and errorCode <= 2110: sys.stderr.write("TWS WARNING - %s: %s\n" % (errorCode, errorString)) else: sys.stderr.write("TWS ERROR - %s: %s\n" % (errorCode, errorString))
Error during communication with TWS
def html(self): output = self.html_preamble output += self._repr_html_() output += self.html_post return output
Gives an html representation of the assessment.
def pair_list(args): if args.entity_type and args.entity: if args.entity_type == 'pair': return [ args.entity.strip() ] elif args.entity_type == 'participant': entities = _entity_paginator(args.project, args.workspace, 'pair', page_size=2000) return [ e['name'] for e in entities if e['attributes']['participant']['entityName'] == args.entity] r = fapi.get_entity(args.project, args.workspace, args.entity_type, args.entity) fapi._check_response_code(r, 200) pairs = r.json()['attributes']["pairs"]['items'] return [ pair['entityName'] for pair in pairs] return __get_entities(args, "pair", page_size=2000)
List pairs within a container.
def rank(items, sequence=string.ascii_lowercase): items = set(items) return sum(1 << i for i, s in enumerate(sequence) if s in items)
Rank items from sequence in colexicographical order. >>> [rank(i) for i in ('', 'a', 'b', 'ab', 'c')] [0, 1, 2, 3, 4] >>> rank('spam') 299009
def _decode_image(fobj, session, filename): buf = fobj.read() image = tfds.core.lazy_imports.cv2.imdecode( np.fromstring(buf, dtype=np.uint8), flags=3) if image is None: logging.warning( "Image %s could not be decoded by OpenCV, falling back to TF", filename) try: image = tf.image.decode_image(buf, channels=3) image = session.run(image) except tf.errors.InvalidArgumentError: logging.fatal("Image %s could not be decoded by Tensorflow", filename) if len(image.shape) == 4: image = image.reshape(image.shape[1:]) return image
Reads and decodes an image from a file object as a Numpy array. The SUN dataset contains images in several formats (despite the fact that all of them have .jpg extension). Some of them are: - BMP (RGB) - PNG (grayscale, RGBA, RGB interlaced) - JPEG (RGB) - GIF (1-frame RGB) Since TFDS assumes that all images have the same number of channels, we convert all of them to RGB. Args: fobj: File object to read from. session: TF session used to decode the images. filename: Filename of the original image in the archive. Returns: Numpy array with shape (height, width, channels).
def create_perm(self, using=None, *args, **kwargs): from django.conf import settings from django.contrib.auth.models import Permission from django.contrib.contenttypes.models import ContentType constance_dbs = getattr(settings, 'CONSTANCE_DBS', None) if constance_dbs is not None and using not in constance_dbs: return if ContentType._meta.installed and Permission._meta.installed: content_type, created = ContentType.objects.using(using).get_or_create( app_label='constance', model='config', ) permission, created = Permission.objects.using(using).get_or_create( content_type=content_type, codename='change_config', defaults={'name': 'Can change config'})
Creates a fake content type and permission to be able to check for permissions
def _match(self, struct1, struct2, fu, s1_supercell=True, use_rms=False, break_on_match=False): ratio = fu if s1_supercell else 1/fu if len(struct1) * ratio >= len(struct2): return self._strict_match( struct1, struct2, fu, s1_supercell=s1_supercell, break_on_match=break_on_match, use_rms=use_rms) else: return self._strict_match( struct2, struct1, fu, s1_supercell=(not s1_supercell), break_on_match=break_on_match, use_rms=use_rms)
Matches one struct onto the other
def get_oldest_commit_date(self): oldest_commit = self.get_oldest_commit() return self.git.get_commit_date(oldest_commit, self.tz_name)
Get datetime of oldest commit involving this file :returns: Datetime of oldest commit
def generate_method_deprecation_message(to_be_removed_in_version, old_method_name, method_name=None, module_name=None): message = "Call to deprecated function '{old_method_name}'. This method will be removed in version '{version}'".format( old_method_name=old_method_name, version=to_be_removed_in_version, ) if method_name is not None and module_name is not None: message += " Please use the '{method_name}' method on the '{module_name}' class moving forward.".format( method_name=method_name, module_name=module_name, ) return message
Generate a message to be used when warning about the use of deprecated methods. :param to_be_removed_in_version: Version of this module the deprecated method will be removed in. :type to_be_removed_in_version: str :param old_method_name: Deprecated method name. :type old_method_name: str :param method_name: Method intended to replace the deprecated method indicated. This method's docstrings are included in the decorated method's docstring. :type method_name: str :param module_name: Name of the module containing the new method to use. :type module_name: str :return: Full deprecation warning message for the indicated method. :rtype: str
def _assign_values_to_unbound_vars(unbound_vars, unbound_var_values): context = {} for key, value in six.iteritems(unbound_var_values): if key not in unbound_vars: raise ValueError('unexpected key: %s. Legal values are: %s' % (key, list(six.iterkeys(unbound_vars)))) context[unbound_vars[key]] = value unspecified = [] for unbound_var in six.itervalues(unbound_vars): if unbound_var not in context: if unbound_var.has_default(): context[unbound_var] = unbound_var.default else: unspecified.append(unbound_var.key) if unspecified: raise ValueError('Unspecified keys: %s' % unspecified) return context
Assigns values to the vars and raises ValueError if one is missing.
def _load_config(configfile, section=None): if os.path.exists(configfile): config = read_config(configfile) if section is not None: if section in config: return config._sections[section] else: bot.warning('%s not found in %s' %(section, configfile)) return config
general function to load and return a configuration given a helper name. This function is used for both the user config and global help me config files.
def make_valid_string(self, string=''): if not self.is_valid_str(string): if string in self.val_map and not self.allow_dups: raise IndexError("Value {} has already been given to the sanitizer".format(string)) internal_name = super(_NameSanitizer, self).make_valid_string() self.val_map[string] = internal_name return internal_name else: if self.map_valid: self.val_map[string] = string return string
Inputting a value for the first time
def unpack_types(types, args, argnames, major): if len(types) > 0: multiple = types[-1]._multiple else: multiple = False if len(types) < len(args) and not multiple: raise FailReply("Too many parameters given.") params = [] for i, kattype in enumerate(types): name = "" if i < len(argnames): name = argnames[i] params.append(Parameter(i+1, name, kattype, major)) if len(args) > len(types) and multiple: for i in range(len(types), len(args)): params.append(Parameter(i+1, name, kattype, major)) return map(lambda param, arg: param.unpack(arg), params, args)
Parse arguments according to types list. Parameters ---------- types : list of kattypes The types of the arguments (in order). args : list of strings The arguments to parse. argnames : list of strings The names of the arguments. major : integer Major version of KATCP to use when packing types
def get_match_details(self, match_id=None, **kwargs): if 'match_id' not in kwargs: kwargs['match_id'] = match_id url = self.__build_url(urls.GET_MATCH_DETAILS, **kwargs) req = self.executor(url) if self.logger: self.logger.info('URL: {0}'.format(url)) if not self.__check_http_err(req.status_code): return response.build(req, url, self.raw_mode)
Returns a dictionary containing the details for a Dota 2 match :param match_id: (int, optional) :return: dictionary of matches, see :doc:`responses </responses>`
def get_client_ip(self): if self.client_ip: return self.client_ip try: client = os.environ.get('SSH_CONNECTION', os.environ.get('SSH_CLIENT')) self.client_ip = client.split()[0] self.logdebug('client_ip: %s\n' % self.client_ip) return self.client_ip except: raise SSHEnvironmentError('cannot identify the ssh client ' 'IP address')
Return the client IP from the environment.
def apply_child_computation(self, child_msg: Message) -> 'BaseComputation': child_computation = self.generate_child_computation(child_msg) self.add_child_computation(child_computation) return child_computation
Apply the vm message ``child_msg`` as a child computation.
def check(self, check_url=None): if check_url is not None: self.check_url = self._normalize_check_url(check_url) response = None sleeped = 0.0 t = datetime.now() while not response: try: response = requests.get(self.check_url, verify=False) except requests.exceptions.ConnectionError: if sleeped > self.timeout: self._kill() raise LiveAndLetDieError( '{0} server {1} didn\'t start in specified timeout {2} ' 'seconds!\ncommand: {3}'.format( self.__class__.__name__, self.check_url, self.timeout, ' '.join(self.create_command()) ) ) time.sleep(1) sleeped = _get_total_seconds(datetime.now() - t) return _get_total_seconds(datetime.now() - t)
Checks whether a server is running. :param str check_url: URL where to check whether the server is running. Default is ``"http://{self.host}:{self.port}"``.
def to_ufo_components(self, ufo_glyph, layer): pen = ufo_glyph.getPointPen() for index, component in enumerate(layer.components): pen.addComponent(component.name, component.transform) if component.anchor: if COMPONENT_INFO_KEY not in ufo_glyph.lib: ufo_glyph.lib[COMPONENT_INFO_KEY] = [] ufo_glyph.lib[COMPONENT_INFO_KEY].append( {"name": component.name, "index": index, "anchor": component.anchor} ) for key in ["alignment", "locked", "smartComponentValues"]: values = [getattr(c, key) for c in layer.components] if any(values): ufo_glyph.lib[_lib_key(key)] = values
Draw .glyphs components onto a pen, adding them to the parent glyph.
def lock(self, atime=30, ltime=5, identifier=None): if identifier is None: identifier = nice_identifier() if self._acquire_lock(identifier, atime, ltime) != identifier: raise LockError("could not acquire lock") try: self._session_lock_identifier = identifier yield self finally: self._release_lock(identifier) self._session_lock_identifier = None
Context manager to acquire the namespace global lock. This is typically used for multi-step registry operations, such as a read-modify-write sequence:: with registry.lock() as session: d = session.get('dict', 'key') del d['traceback'] session.set('dict', 'key', d) Callers may provide their own `identifier`; if they do, they must ensure that it is reasonably unique (e.g., a UUID). Using a stored worker ID that is traceable back to the lock holder is a good practice. :param int atime: maximum time (in seconds) to acquire lock :param int ltime: maximum time (in seconds) to own lock :param str identifier: worker-unique identifier for the lock
def projection_to_raster_coords(self, lat, lon): r_px_py = np.array([1, lon, lat]) tg = inv(np.array([[1, 0, 0], self.geotransform[0:3], self.geotransform[3:6]])) return np.inner(tg, r_px_py)[1:]
Returns pixel centers. See documentation for the GDAL function GetGeoTransform for details.
def get_api_docs(routes): routes = map(_get_tuple_from_route, routes) documentation = [] for url, rh, methods in sorted(routes, key=lambda a: a[0]): if issubclass(rh, APIHandler): documentation.append(_get_route_doc(url, rh, methods)) documentation = ( "**This documentation is automatically generated.**\n\n" + "**Output schemas only represent `data` and not the full output; " + "see output examples and the JSend specification.**\n" + "\n<br>\n<br>\n".join(documentation) ) return documentation
Generates GitHub Markdown formatted API documentation using provided schemas in RequestHandler methods and their docstrings. :type routes: [(url, RequestHandler), ...] :param routes: List of routes (this is ideally all possible routes of the app) :rtype: str :returns: generated GFM-formatted documentation
def create_contact(self, email=None, first_name=None, last_name=None, phone_number=None): result = {} if email: result['email'] = email if first_name is not None: result['first_name'] = first_name if last_name is not None: result['last_name'] = last_name if phone_number is not None: result['phone_number'] = phone_number return result if len(result) > 0 else None
Create a contant which is later passed to payment.
def error(self, correlation_id, error, message, *args, **kwargs): self._format_and_write(LogLevel.Error, correlation_id, error, message, args, kwargs)
Logs recoverable application error. :param correlation_id: (optional) transaction id to trace execution through call chain. :param error: an error object associated with this message. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message.
def _check_db_exists(self, instance): dsn, host, username, password, database, driver = self._get_access_info(instance, self.DEFAULT_DB_KEY) context = "{} - {}".format(host, database) if self.existing_databases is None: cursor = self.get_cursor(instance, None, self.DEFAULT_DATABASE) try: self.existing_databases = {} cursor.execute(DATABASE_EXISTS_QUERY) for row in cursor: self.existing_databases[row.name] = True except Exception as e: self.log.error("Failed to check if database {} exists: {}".format(database, e)) return False, context finally: self.close_cursor(cursor) return database in self.existing_databases, context
Check if the database we're targeting actually exists If not then we won't do any checks This allows the same config to be installed on many servers but fail gracefully
def _flip_lr(x): "Flip `x` horizontally." if isinstance(x, ImagePoints): x.flow.flow[...,0] *= -1 return x return tensor(np.ascontiguousarray(np.array(x)[...,::-1]))
Flip `x` horizontally.
def CopyAttributesFromSessionCompletion(self, session_completion): if self.identifier != session_completion.identifier: raise ValueError('Session identifier mismatch.') self.aborted = session_completion.aborted if session_completion.analysis_reports_counter: self.analysis_reports_counter = ( session_completion.analysis_reports_counter) self.completion_time = session_completion.timestamp if session_completion.event_labels_counter: self.event_labels_counter = session_completion.event_labels_counter if session_completion.parsers_counter: self.parsers_counter = session_completion.parsers_counter
Copies attributes from a session completion. Args: session_completion (SessionCompletion): session completion attribute container. Raises: ValueError: if the identifier of the session completion does not match that of the session.
def get(name, function=None): if function is not None: if hasattr(function, Settings.FUNCTION_SETTINGS_NAME): if name in getattr(function, Settings.FUNCTION_SETTINGS_NAME): return getattr(function, Settings.FUNCTION_SETTINGS_NAME)[name] return Settings.__global_setting_values[name]
Get a setting. `name` should be the name of the setting to look for. If the optional argument `function` is passed, this will look for a value local to the function before retrieving the global value.
def identity(self): if self.dataset is None: s = object_session(self) ds = s.query(Dataset).filter(Dataset.id_ == self.d_id).one() else: ds = self.dataset d = { 'id': self.id, 'vid': self.vid, 'name': self.name, 'vname': self.vname, 'ref': self.ref, 'space': self.space, 'time': self.time, 'table': self.table_name, 'grain': self.grain, 'variant': self.variant, 'segment': self.segment, 'format': self.format if self.format else 'db' } return PartitionIdentity.from_dict(dict(list(ds.dict.items()) + list(d.items())))
Return this partition information as a PartitionId.
def list_registered_stateful_ops_without_inputs(): return set([ name for name, op in op_def_registry.get_registered_ops().items() if op.is_stateful and not op.input_arg ])
Returns set of registered stateful ops that do not expect inputs. This list is used to identify the ops to be included in the state-graph and that are subsequently fed into the apply-graphs. Returns: A set of strings.
def upgradeUpload(self, file): files = {'upfile': open(file, 'rb')} return self.__post_files('/upgrade/upload', files=files)
Upgrade the firmware of the miner.
def write_config(self): template = util.render_template( self._get_config_template(), config_options=self.config_options) util.write_file(self.config_file, template)
Writes the provisioner's config file to disk and returns None. :return: None
def add_field_with_label(self, key, label_description, field): self.inputs[key] = field label = Label(label_description) label.style['margin'] = '0px 5px' label.style['min-width'] = '30%' container = HBox() container.style.update({'justify-content':'space-between', 'overflow':'auto', 'padding':'3px'}) container.append(label, key='lbl' + key) container.append(self.inputs[key], key=key) self.container.append(container, key=key)
Adds a field to the dialog together with a descriptive label and a unique identifier. Note: You can access to the fields content calling the function GenericDialog.get_field(key). Args: key (str): The unique identifier for the field. label_description (str): The string content of the description label. field (Widget): The instance of the field Widget. It can be for example a TextInput or maybe a custom widget.
def describe_api_integration(restApiId, resourcePath, httpMethod, region=None, key=None, keyid=None, profile=None): try: resource = describe_api_resource(restApiId, resourcePath, region=region, key=key, keyid=keyid, profile=profile).get('resource') if resource: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) integration = conn.get_integration(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod) return {'integration': _convert_datetime_str(integration)} return {'error': 'no such resource'} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
Get an integration for a given method in a given API CLI Example: .. code-block:: bash salt myminion boto_apigateway.describe_api_integration restApiId resourcePath httpMethod
def stop_tracing_process(self, pid): for thread in self.system.get_process(pid).iter_threads(): self.__stop_tracing(thread)
Stop tracing mode for all threads in the given process. @type pid: int @param pid: Global ID of process to stop tracing.
def create(self, public_key, friendly_name=values.unset, account_sid=values.unset): data = values.of({ 'PublicKey': public_key, 'FriendlyName': friendly_name, 'AccountSid': account_sid, }) payload = self._version.create( 'POST', self._uri, data=data, ) return PublicKeyInstance(self._version, payload, )
Create a new PublicKeyInstance :param unicode public_key: A URL encoded representation of the public key :param unicode friendly_name: A string to describe the resource :param unicode account_sid: The Subaccount this Credential should be associated with. :returns: Newly created PublicKeyInstance :rtype: twilio.rest.accounts.v1.credential.public_key.PublicKeyInstance
def select_device_by_load(wproc=0.5, wmem=0.5): ids = device_by_load(wproc=wproc, wmem=wmem) cp.cuda.Device(ids[0]).use() return ids[0]
Set the current device for cupy as the device with the lowest weighted average of processor and memory load.
def render(self): "Re-render Jupyter cell for batch of images." clear_output() self.write_csv() if self.empty() and self._skipped>0: return display(f'No images to show :). {self._skipped} pairs were ' f'skipped since at least one of the images was deleted by the user.') elif self.empty(): return display('No images to show :)') if self.batch_contains_deleted(): self.next_batch(None) self._skipped += 1 else: display(self.make_horizontal_box(self.get_widgets(self._duplicates))) display(self.make_button_widget('Next Batch', handler=self.next_batch, style="primary"))
Re-render Jupyter cell for batch of images.
def concatenate(self, other): other = as_shape(other) if self._dims is None or other.dims is None: return unknown_shape() else: return TensorShape(self._dims + other.dims)
Returns the concatenation of the dimension in `self` and `other`. *N.B.* If either `self` or `other` is completely unknown, concatenation will discard information about the other shape. In future, we might support concatenation that preserves this information for use with slicing. Args: other: Another `TensorShape`. Returns: A `TensorShape` whose dimensions are the concatenation of the dimensions in `self` and `other`.
def splitlines(self, keepends=False): lines = self.split('\n') return [line+'\n' for line in lines] if keepends else ( lines if lines[-1] else lines[:-1])
Return a list of lines, split on newline characters, include line boundaries, if keepends is true.
def output_files(self): outs = [os.path.join(self.address.repo, self.address.path, x) for x in self.params['outs']] return outs
Returns list of output files from this rule, relative to buildroot. In this case it's simple (for now) - the output files are enumerated in the rule definition.
def check_complicance(self): if(any([ma for ma in vars(self) if ma.startswith('media_') and getattr(self, ma)]) and not self.media_group and not self.media_content and not self.media_player and not self.media_peerLink and not self.media_location ): raise AttributeError( "Using media elements requires the specification of at least " "one of the following elements: 'media_group', " "'media_content', 'media_player', 'media_peerLink' or " "'media_location'.") if not self.media_player: if self.media_content: if isinstance(self.media_content, list): if not all([False for mc in self.media_content if 'url' not in mc.element_attrs]): raise AttributeError( "MediaRSSItems require a media_player attribute " "if a media_content has no url set.") else: if not self.media_content.element_attrs['url']: raise AttributeError( "MediaRSSItems require a media_player attribute " "if a media_content has no url set.") pass elif self.media_group: raise NotImplementedError( "MediaRSSItem: media_group check not implemented yet.")
Check compliance with Media RSS Specification, Version 1.5.1. see http://www.rssboard.org/media-rss Raises AttributeError on error.
def user_remove(user, host='localhost', **connection_args): dbc = _connect(**connection_args) if dbc is None: return False cur = dbc.cursor() qry = 'DROP USER %(user)s@%(host)s' args = {} args['user'] = user args['host'] = host try: _execute(cur, qry, args) except MySQLdb.OperationalError as exc: err = 'MySQL Error {0}: {1}'.format(*exc.args) __context__['mysql.error'] = err log.error(err) return False if not user_exists(user, host, **connection_args): log.info('User \'%s\'@\'%s\' has been removed', user, host) return True log.info('User \'%s\'@\'%s\' has NOT been removed', user, host) return False
Delete MySQL user CLI Example: .. code-block:: bash salt '*' mysql.user_remove frank localhost
def time_to_seconds(x): if isinstance(x, time): return ((((x.hour * 60) + x.minute) * 60 + x.second) * 10**6 + x.microsecond) / 10**6 if is_str(x): return x return x and max(0, min(x, 24 * 3600 - 10**-6))
Convert a time in a seconds sum
def process_prefix(self, prefix: str) -> Union[Namespace, None]: if self.namespaces.get(prefix): return self.namespaces[prefix] iri: str = common_namespaces.get(prefix) if iri: return self.add_namespace(prefix, iri)
Add namespace to graph if it has a local match This allows qnames to be used without adding their respected namespaces if they are in the common_namespaces local dict. This is is to save a butt-ton of time trying to see what the ontology has as far as uris go. Args: prefix: prefix of the uri in the rdflib namespace to be checked if it exists in the local dict of common_namespaces. Returns: Namespace of uri if add or already exists; else None
def done(self): try: scoop._control.execQueue.remove(self) scoop._control.execQueue.socket.sendFuture(self) except ValueError as e: pass scoop._control.execQueue.updateQueue() return self._ended()
Returns True if the call was successfully cancelled or finished running, False otherwise. This function updates the executionQueue so it receives all the awaiting message.
def add_personalization(self, personalization, index=0): self._personalizations = self._ensure_append( personalization, self._personalizations, index)
Add a Personaliztion object :param personalizations: Add a Personalization object :type personalizations: Personalization :param index: The index where to add the Personalization :type index: int
def generate_report(book_url): shares_no = None avg_price = None stock_template = templates.load_jinja_template("stock_template.html") stock_rows = "" with piecash.open_book(book_url, readonly=True, open_if_lock=True) as book: all_stocks = portfoliovalue.get_all_stocks(book) for stock in all_stocks: for_date = datetime.today().date model = portfoliovalue.get_stock_model_from(book, stock, for_date) stock_rows += stock_template.render(model) template = templates.load_jinja_template("template.html") result = template.render(**locals()) return result
Generates an HTML report content.
def _all(self, *args, **kwargs): data = dict() data['software'] = self._software(**kwargs) data['system'] = self._system(**kwargs) data['services'] = self._services(**kwargs) try: data['configuration'] = self._configuration(**kwargs) except InspectorQueryException as ex: data['configuration'] = 'N/A' log.error(ex) data['payload'] = self._payload(**kwargs) or 'N/A' return data
Return all the summary of the particular system.
def build_from_yamlstr(cls, yamlstr): top_dict = yaml.safe_load(yamlstr) coordsys = top_dict.pop('coordsys') output_list = [] for e_key, e_dict in sorted(top_dict.items()): if e_key == 'coordsys': continue e_dict = top_dict[e_key] e_dict['coordsys'] = coordsys output_list += cls.build_from_energy_dict(e_key, e_dict) return output_list
Build a list of components from a yaml string
def beam_splitter(ax, p0, size=2.54, alpha=0, format=None, **kwds): r if format is None: format = 'k-' a = size/2 x0 = [a, -a, -a, a, a, -a] y0 = [a, a, -a, -a, a, -a] cur_list = [(x0, y0)] cur_list = rotate_and_traslate(cur_list, alpha, p0) for curi in cur_list: ax.plot(curi[0], curi[1], format, **kwds)
r"""Draw a beam splitter.
def time_stats(self, **kwargs): if 'time_stats' in self.attributes: return self.attributes['time_stats'] path = '%s/%s/time_stats' % (self.manager.path, self.get_id()) return self.manager.gitlab.http_get(path, **kwargs)
Get time stats for the object. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTimeTrackingError: If the time tracking update cannot be done
def to_dict(self): d = {} d['start'] = date_to_str(self.start) d['end'] = date_to_str(self.end) return d
Transform the date-range to a dict.
def from_file(cls, fp, is_outlook=False): log.debug("Parsing email from file {!r}".format(fp)) with ported_open(fp) as f: message = email.message_from_file(f) if is_outlook: log.debug("Removing temp converted Outlook email {!r}".format(fp)) os.remove(fp) return cls(message)
Init a new object from a file path. Args: fp (string): file path of raw email is_outlook (boolean): if True is an Outlook email Returns: Instance of MailParser
def query(self, query, max_results=None, timeout=0, dry_run=False, use_legacy_sql=None, external_udf_uris=None): logger.debug('Executing query: %s' % query) query_data = { 'query': query, 'timeoutMs': timeout * 1000, 'dryRun': dry_run, 'maxResults': max_results } if use_legacy_sql is not None: query_data['useLegacySql'] = use_legacy_sql if external_udf_uris: query_data['userDefinedFunctionResources'] = \ [ {'resourceUri': u} for u in external_udf_uris ] return self._submit_query_job(query_data)
Submit a query to BigQuery. Parameters ---------- query : str BigQuery query string max_results : int, optional The maximum number of rows to return per page of results. timeout : float, optional How long to wait for the query to complete, in seconds before the request times out and returns. dry_run : bool, optional If True, the query isn't actually run. A valid query will return an empty response, while an invalid one will return the same error message it would if it wasn't a dry run. use_legacy_sql : bool, optional. Default True. If False, the query will use BigQuery's standard SQL (https://cloud.google.com/bigquery/sql-reference/) external_udf_uris : list, optional Contains external UDF URIs. If given, URIs must be Google Cloud Storage and have .js extensions. Returns ------- tuple (job id, query results) if the query completed. If dry_run is True, job id will be None and results will be empty if the query is valid or a ``dict`` containing the response if invalid. Raises ------ BigQueryTimeoutException on timeout
def from_xml(self, doc): import xml.sax handler = DomainDumpParser(self) xml.sax.parse(doc, handler) return handler
Load this domain based on an XML document
def download(self): self.page = requests.get(self.url) self.tree = html.fromstring(self.page.text)
Downloads HTML from url.
def notimplemented (func): def newfunc (*args, **kwargs): co = func.func_code attrs = (co.co_name, co.co_filename, co.co_firstlineno) raise NotImplementedError("function %s at %s:%d is not implemented" % attrs) return update_func_meta(newfunc, func)
Raises a NotImplementedError if the function is called.
def _read_routes_c_v1(): def _extract_ip(obj): return inet_ntop(socket.AF_INET, struct.pack("<I", obj)) routes = [] for route in GetIpForwardTable(): ifIndex = route['ForwardIfIndex'] dest = route['ForwardDest'] netmask = route['ForwardMask'] nexthop = _extract_ip(route['ForwardNextHop']) metric = route['ForwardMetric1'] try: iface = dev_from_index(ifIndex) if iface.ip == "0.0.0.0": continue except ValueError: continue ip = iface.ip metric = metric + iface.ipv4_metric routes.append((dest, netmask, nexthop, iface, ip, metric)) return routes
Retrieve Windows routes through a GetIpForwardTable call. This is compatible with XP but won't get IPv6 routes.
def zincr(self, name, key, amount=1): amount = get_integer('amount', amount) return self.execute_command('zincr', name, key, amount)
Increase the score of ``key`` in zset ``name`` by ``amount``. If no key exists, the value will be initialized as ``amount`` Like **Redis.ZINCR** :param string name: the zset name :param string key: the key name :param int amount: increments :return: the integer value of ``key`` in zset ``name`` :rtype: int >>> ssdb.zincr('zset_2', 'key1', 7) 49 >>> ssdb.zincr('zset_2', 'key2', 3) 317 >>> ssdb.zincr('zset_2', 'key_not_exists', 101) 101 >>> ssdb.zincr('zset_not_exists', 'key_not_exists', 8848) 8848
def get_table(self, name='Meta', h5loc='/meta'): if not self.meta: return None data = defaultdict(list) for entry in self.meta: for key, value in entry.items(): data[key].append(value) dtypes = [] for key, values in data.items(): max_len = max(map(len, values)) dtype = 'S{}'.format(max_len) dtypes.append((key, dtype)) tab = Table( data, dtype=dtypes, h5loc=h5loc, name='Meta', h5singleton=True ) return tab
Convert metadata to a KM3Pipe Table. Returns `None` if there is no data. Each column's dtype will be set to a fixed size string (numpy.string_) with the length of the longest entry, since writing variable length strings does not fit the current scheme.
def create_snmp_manager(self, manager, host, **kwargs): data = {"host": host} data.update(kwargs) return self._request("POST", "snmp/{0}".format(manager), data)
Create an SNMP manager. :param manager: Name of manager to be created. :type manager: str :param host: IP address or DNS name of SNMP server to be used. :type host: str :param \*\*kwargs: See the REST API Guide on your array for the documentation on the request: **POST snmp/:manager** :type \*\*kwargs: optional :returns: A dictionary describing the created SNMP manager. :rtype: ResponseDict
def GetGroups(r, bulk=False): if bulk: return r.request("get", "/2/groups", query={"bulk": 1}) else: groups = r.request("get", "/2/groups") return r.applier(itemgetters("name"), groups)
Gets all node groups in the cluster. @type bulk: bool @param bulk: whether to return all information about the groups @rtype: list of dict or str @return: if bulk is true, a list of dictionaries with info about all node groups in the cluster, else a list of names of those node groups
def fetch_identifier_component(self, endpoint_name, identifier_data, query_params=None): if query_params is None: query_params = {} identifier_input = self.get_identifier_input(identifier_data) return self._api_client.fetch(endpoint_name, identifier_input, query_params)
Common method for handling parameters before passing to api_client
def timethis(func): func_module, func_name = func.__module__, func.__name__ @functools.wraps(func) def wrapper(*args, **kwargs): start = _time_perf_counter() r = func(*args, **kwargs) end = _time_perf_counter() print('timethis : <{}.{}> : {}'.format(func_module, func_name, end - start)) return r return wrapper
A wrapper use for timeit.
def find_needed_formatter(input_format, output_format): selected_registry = [re.cls for re in registry if re.category==RegistryCategories.formatters] needed_formatters = [] for formatter in selected_registry: formatter_inst = formatter() if input_format in formatter_inst.input_formats and output_format in formatter_inst.output_formats: needed_formatters.append(formatter) if len(needed_formatters)>0: return needed_formatters[0] return None
Find a data formatter given an input and output format input_format - needed input format. see utils.input.dataformats output_format - needed output format. see utils.input.dataformats
def check_with_pep8(source_code, filename=None): try: args = get_checker_executable('pycodestyle') results = check(args, source_code, filename=filename, options=['-r']) except Exception: results = [] if DEBUG_EDITOR: traceback.print_exc() return results
Check source code with pycodestyle
def get_git_root(index=3): path = get_current_path(index=index) while True: git_path = os.path.join(path, '.git') if os.path.isdir(git_path): return path if os.path.dirname(path) == path: raise RuntimeError("Cannot find git root") path = os.path.split(path)[0]
Get the path of the git root directory of the caller's file Raises a RuntimeError if a git repository cannot be found
def create(self, table_id, schema): from google.cloud.bigquery import SchemaField from google.cloud.bigquery import Table if self.exists(table_id): raise TableCreationError( "Table {0} already " "exists".format(table_id) ) if not _Dataset(self.project_id, credentials=self.credentials).exists( self.dataset_id ): _Dataset( self.project_id, credentials=self.credentials, location=self.location, ).create(self.dataset_id) table_ref = self.client.dataset(self.dataset_id).table(table_id) table = Table(table_ref) for field in schema["fields"]: if "mode" not in field: field["mode"] = "NULLABLE" table.schema = [ SchemaField.from_api_repr(field) for field in schema["fields"] ] try: self.client.create_table(table) except self.http_error as ex: self.process_http_error(ex)
Create a table in Google BigQuery given a table and schema Parameters ---------- table : str Name of table to be written schema : str Use the generate_bq_schema to generate your table schema from a dataframe.
def print_details(self): print("Title:", self.title) print("Category:", self.category) print("Page: ", self.page) print("Size: ", self.size) print("Files: ", self.files) print("Age: ", self.age) print("Seeds:", self.seeders) print("Leechers: ", self.leechers) print("Magnet: ", self.magnet) print("Download: ", self.download) print("Verified:", self.isVerified)
Print torrent details
def undo(self, key_value, modifier_mask): for key, tab in gui_singletons.main_window_controller.get_controller('states_editor_ctrl').tabs.items(): if tab['controller'].get_controller('source_ctrl') is not None and \ react_to_event(self.view, tab['controller'].get_controller('source_ctrl').view.textview, (key_value, modifier_mask)) or \ tab['controller'].get_controller('description_ctrl') is not None and \ react_to_event(self.view, tab['controller'].get_controller('description_ctrl').view.textview, (key_value, modifier_mask)): return False if self._selected_sm_model is not None: self._selected_sm_model.history.undo() return True else: logger.debug("Undo is not possible now as long as no state_machine is selected.")
Undo for selected state-machine if no state-source-editor is open and focused in states-editor-controller. :return: True if a undo was performed, False if focus on source-editor. :rtype: bool
def run_ps(self, script): encoded_ps = b64encode(script.encode('utf_16_le')).decode('ascii') rs = self.run_cmd('powershell -encodedcommand {0}'.format(encoded_ps)) if len(rs.std_err): rs.std_err = self._clean_error_msg(rs.std_err) return rs
base64 encodes a Powershell script and executes the powershell encoded script command
def exists(self, filename): result = False if is_package(filename): packages = self.connection["jss"].Package().retrieve_all() for package in packages: if package.findtext("filename") == filename: result = True break else: scripts = self.connection["jss"].Script().retrieve_all() for script in scripts: if script.findtext("filename") == filename: result = True break return result
Check for the existence of a package or script. Unlike other DistributionPoint types, JDS and CDP types have no documented interface for checking whether the server and its children have a complete copy of a file. The best we can do is check for an object using the API /packages URL--JSS.Package() or /scripts and look for matches on the filename. If this is not enough, please use the alternate exists_with_casper method. For example, it's possible to create a Package object but never upload a package file, and this method will still return "True". Also, this may be slow, as it needs to retrieve the complete list of packages from the server.
def set_index(self, index): self.setCurrentIndex(index) self.new_root.emit(index) self.scrollTo(index)
Set the current index to the row of the given index :param index: the index to set the level to :type index: QtCore.QModelIndex :returns: None :rtype: None :raises: None
def _load_type_counts(self): rel_path = os.path.join(CLTK_DATA_DIR, 'old_english', 'model', 'old_english_models_cltk', 'data', 'oe.counts') path = os.path.expanduser(rel_path) self.type_counts = {} with open(path, 'r') as infile: lines = infile.read().splitlines() for line in lines: count, word = line.split() self.type_counts[word] = int(count)
Load the table of frequency counts of word forms.
def _vector_pattern_uniform_op_left(func): @wraps(func) def verif(self, patt): if isinstance(patt, TransversePatternUniform): if self._tdsphere.shape == patt._tdsphere.shape: return TransversePatternUniform(func(self, self._tdsphere, patt._tdsphere), func(self, self._pdsphere, patt._pdsphere), doublesphere=True) else: raise ValueError(err_msg['VP_sz_msmtch'] % \ (self.nrows, self.ncols, patt.nrows, patt.ncols)) elif isinstance(patt, numbers.Number): return TransversePatternUniform(func(self, self._tdsphere, patt), func(self, self._pdsphere, patt), doublesphere=True) else: raise TypeError(err_msg['no_combi_VP']) return verif
decorator for operator overloading when VectorPatternUniform is on the left
def run_sls_remove(sls_cmd, env_vars): sls_process = subprocess.Popen(sls_cmd, stdout=subprocess.PIPE, env=env_vars) stdoutdata, _stderrdata = sls_process.communicate() sls_return = sls_process.wait() print(stdoutdata) if sls_return != 0 and (sls_return == 1 and not ( re.search(r"Stack '.*' does not exist", stdoutdata))): sys.exit(sls_return)
Run sls remove command.
def list(self, full=False): if self.is_api: return self._list_api(full=full) else: return self._list_fs(full=full)
List all of the bundles in the remote