code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def update(self, key, item): return lib.zhash_update(self._as_parameter_, key, item)
Update item into hash table with specified key and item. If key is already present, destroys old item and inserts new one. Use free_fn method to ensure deallocator is properly called on item.
def array(self, size, type, name, *parameters): self._new_list(size, name) BuiltIn().run_keyword(type, '', *parameters) self._end_list()
Define a new array of given `size` and containing fields of type `type`. `name` if the name of this array element. The `type` is the name of keyword that is executed as the contents of the array and optional extra parameters are passed as arguments to this keyword. Examples: | Array | 8 | u16 | myArray | | u32 | length | | Array | length | someStruct | myArray | <argument for someStruct> |
def get_psd(self, omega): w = np.asarray(omega) (alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag) = self.coefficients p = get_psd_value( alpha_real, beta_real, alpha_complex_real, alpha_complex_imag, beta_complex_real, beta_complex_imag, w.flatten(), ) return p.reshape(w.shape)
Compute the PSD of the term for an array of angular frequencies Args: omega (array[...]): An array of frequencies where the PSD should be evaluated. Returns: The value of the PSD for each ``omega``. This will have the same shape as ``omega``.
def read(self, cnt=None): if cnt is None or cnt < 0: cnt = self._remain elif cnt > self._remain: cnt = self._remain if cnt == 0: return EMPTY data = self._read(cnt) if data: self._md_context.update(data) self._remain -= len(data) if len(data) != cnt: raise BadRarFile("Failed the read enough data") if not data or self._remain == 0: self._check() return data
Read all or specified amount of data from archive entry.
def draw(self): self.context.set_line_cap(cairo.LINE_CAP_SQUARE) self.context.save() self.context.rectangle(*self.rect) self.context.clip() cell_borders = CellBorders(self.cell_attributes, self.key, self.rect) borders = list(cell_borders.gen_all()) borders.sort(key=attrgetter('width', 'color')) for border in borders: border.draw(self.context) self.context.restore()
Draws cell border to context
def select(self, columns=(), by=(), where=(), **kwds): return self._seu('select', columns, by, where, kwds)
select from self >>> t = q('([]a:1 2 3; b:10 20 30)') >>> t.select('a', where='b > 20').show() a - 3
def unesc(line, language): comment = _COMMENT[language] if line.startswith(comment + ' '): return line[len(comment) + 1:] if line.startswith(comment): return line[len(comment):] return line
Uncomment once a commented line
def _prune_all_if_small(self, small_size, a_or_u): "Return True and delete children if small enough." if self._nodes is None: return True total_size = (self.app_size() if a_or_u else self.use_size()) if total_size < small_size: if a_or_u: self._set_size(total_size, self.use_size()) else: self._set_size(self.app_size(), total_size) return True return False
Return True and delete children if small enough.
def score(self, X, y=None, train=False, **kwargs): score = self.estimator.score(X, y, **kwargs) if train: self.train_score_ = score else: self.test_score_ = score y_pred = self.predict(X) scores = y_pred - y self.draw(y_pred, scores, train=train) return score
Generates predicted target values using the Scikit-Learn estimator. Parameters ---------- X : array-like X (also X_test) are the dependent variables of test set to predict y : array-like y (also y_test) is the independent actual variables to score against train : boolean If False, `score` assumes that the residual points being plotted are from the test data; if True, `score` assumes the residuals are the train data. Returns ------ score : float The score of the underlying estimator, usually the R-squared score for regression estimators.
def roc(y_true, y_score, ax=None): if any((val is None for val in (y_true, y_score))): raise ValueError("y_true and y_score are needed to plot ROC") if ax is None: ax = plt.gca() y_score_is_vector = is_column_vector(y_score) or is_row_vector(y_score) if y_score_is_vector: n_classes = 2 else: _, n_classes = y_score.shape if n_classes > 2: y_true_bin = label_binarize(y_true, classes=np.unique(y_true)) _roc_multi(y_true_bin, y_score, ax=ax) for i in range(n_classes): _roc(y_true_bin[:, i], y_score[:, i], ax=ax) else: if y_score_is_vector: _roc(y_true, y_score, ax) else: _roc(y_true, y_score[:, 1], ax) return ax
Plot ROC curve. Parameters ---------- y_true : array-like, shape = [n_samples] Correct target values (ground truth). y_score : array-like, shape = [n_samples] or [n_samples, 2] for binary classification or [n_samples, n_classes] for multiclass Target scores (estimator predictions). ax: matplotlib Axes Axes object to draw the plot onto, otherwise uses current Axes Notes ----- It is assumed that the y_score parameter columns are in order. For example, if ``y_true = [2, 2, 1, 0, 0, 1, 2]``, then the first column in y_score must countain the scores for class 0, second column for class 1 and so on. Returns ------- ax: matplotlib Axes Axes containing the plot Examples -------- .. plot:: ../../examples/roc.py
def yank_last_arg(event): n = (event.arg if event.arg_present else None) event.current_buffer.yank_last_arg(n)
Like `yank_nth_arg`, but if no argument has been given, yank the last word of each line.
def _replace_missing_values_column(values, mv): for idx, v in enumerate(values): try: if v in EMPTY or v == mv: values[idx] = "nan" elif math.isnan(float(v)): values[idx] = "nan" else: values[idx] = v except (TypeError, ValueError): values[idx] = v return values
Replace missing values in the values list where applicable :param list values: Metadata (column values) :return list values: Metadata (column values)
def get_content_type(self, content_type): qs = self.get_queryset() return qs.filter(content_type__name=content_type)
Get all the items of the given content type related to this item.
def data_import(self, json_response): if 'data' not in json_response: raise PyVLXException('no element data found: {0}'.format( json.dumps(json_response))) data = json_response['data'] for item in data: if 'category' not in item: raise PyVLXException('no element category: {0}'.format( json.dumps(item))) category = item['category'] if category == 'Window opener': self.load_window_opener(item) elif category in ['Roller shutter', 'Dual Shutter']: self.load_roller_shutter(item) elif category in ['Blind']: self.load_blind(item) else: self.pyvlx.logger.warning( 'WARNING: Could not parse product: %s', category)
Import data from json response.
def file_needs_update(target_file, source_file): if not os.path.isfile(target_file) or get_md5_file_hash(target_file) != get_md5_file_hash(source_file): return True return False
Checks if target_file is not existing or differing from source_file :param target_file: File target for a copy action :param source_file: File to be copied :return: True, if target_file not existing or differing from source_file, else False :rtype: False
def update(self, url, doc): if self.pid.is_deleted(): logger.info("Reactivate in DataCite", extra=dict(pid=self.pid)) try: self.api.metadata_post(doc) self.api.doi_post(self.pid.pid_value, url) except (DataCiteError, HttpError): logger.exception("Failed to update in DataCite", extra=dict(pid=self.pid)) raise if self.pid.is_deleted(): self.pid.sync_status(PIDStatus.REGISTERED) logger.info("Successfully updated in DataCite", extra=dict(pid=self.pid)) return True
Update metadata associated with a DOI. This can be called before/after a DOI is registered. :param doc: Set metadata for DOI. :returns: `True` if is updated successfully.
def plot(self, pts_per_edge, color=None, ax=None, with_nodes=False): if self._dimension != 2: raise NotImplementedError( "2D is the only supported dimension", "Current dimension", self._dimension, ) if ax is None: ax = _plot_helpers.new_axis() _plot_helpers.add_patch(ax, color, pts_per_edge, *self._get_edges()) if with_nodes: ax.plot( self._nodes[0, :], self._nodes[1, :], color="black", marker="o", linestyle="None", ) return ax
Plot the current surface. Args: pts_per_edge (int): Number of points to plot per edge. color (Optional[Tuple[float, float, float]]): Color as RGB profile. ax (Optional[matplotlib.artist.Artist]): matplotlib axis object to add plot to. with_nodes (Optional[bool]): Determines if the control points should be added to the plot. Off by default. Returns: matplotlib.artist.Artist: The axis containing the plot. This may be a newly created axis. Raises: NotImplementedError: If the surface's dimension is not ``2``.
def get_functions_and_classes(package): classes, functions = [], [] for name, member in inspect.getmembers(package): if not name.startswith('_'): if inspect.isclass(member): classes.append([name, member]) elif inspect.isfunction(member): functions.append([name, member]) return classes, functions
Retun lists of functions and classes from a package. Parameters ---------- package : python package object Returns -------- list, list : list of classes and functions Each sublist consists of [name, member] sublists.
def cleanup_dataset(dataset, data_home=None, ext=".zip"): removed = 0 data_home = get_data_home(data_home) datadir = os.path.join(data_home, dataset) archive = os.path.join(data_home, dataset+ext) if os.path.exists(datadir): shutil.rmtree(datadir) removed += 1 if os.path.exists(archive): os.remove(archive) removed += 1 return removed
Removes the dataset directory and archive file from the data home directory. Parameters ---------- dataset : str The name of the dataset; should either be a folder in data home or specified in the yellowbrick.datasets.DATASETS variable. data_home : str, optional The path on disk where data is stored. If not passed in, it is looked up from YELLOWBRICK_DATA or the default returned by ``get_data_home``. ext : str, default: ".zip" The extension of the archive file. Returns ------- removed : int The number of objects removed from data_home.
def parse(version): match = _REGEX.match(version) if match is None: raise ValueError('%s is not valid SemVer string' % version) version_parts = match.groupdict() version_parts['major'] = int(version_parts['major']) version_parts['minor'] = int(version_parts['minor']) version_parts['patch'] = int(version_parts['patch']) return version_parts
Parse version to major, minor, patch, pre-release, build parts. :param version: version string :return: dictionary with the keys 'build', 'major', 'minor', 'patch', and 'prerelease'. The prerelease or build keys can be None if not provided :rtype: dict >>> import semver >>> ver = semver.parse('3.4.5-pre.2+build.4') >>> ver['major'] 3 >>> ver['minor'] 4 >>> ver['patch'] 5 >>> ver['prerelease'] 'pre.2' >>> ver['build'] 'build.4'
def offset_limit(func): def func_wrapper(self, start, stop): offset = start limit = stop - start return func(self, offset, limit) return func_wrapper
Decorator that converts python slicing to offset and limit
def hex2term(hexval: str, allow_short: bool = False) -> str: return rgb2term(*hex2rgb(hexval, allow_short=allow_short))
Convert a hex value into the nearest terminal code number.
def shutdown(self): task = asyncio.ensure_future(self.core.shutdown()) self.loop.run_until_complete(task)
Shutdown the application and exit :returns: No return value
def start(self, zone_id, duration): path = 'zone/start' payload = {'id': zone_id, 'duration': duration} return self.rachio.put(path, payload)
Start a zone.
def add(self, item): if not item.startswith(self.prefix): item = os.path.join(self.base, item) self.files.add(os.path.normpath(item))
Add a file to the manifest. :param item: The pathname to add. This can be relative to the base.
def create_object(self, name, image_sets): identifier = str(uuid.uuid4()).replace('-','') properties = {datastore.PROPERTY_NAME: name} obj = PredictionImageSetHandle(identifier, properties, image_sets) self.insert_object(obj) return obj
Create a prediction image set list. Parameters ---------- name : string User-provided name for the image group. image_sets : list(PredictionImageSet) List of prediction image sets Returns ------- PredictionImageSetHandle Object handle for created prediction image set
def _backup_file(path): backup_base = '/var/local/woven-backup' backup_path = ''.join([backup_base,path]) if not exists(backup_path): directory = ''.join([backup_base,os.path.split(path)[0]]) sudo('mkdir -p %s'% directory) sudo('cp %s %s'% (path,backup_path))
Backup a file but never overwrite an existing backup file
def get_setting(key, *default): if default: return get_settings().get(key, default[0]) else: return get_settings()[key]
Return specific search setting from Django conf.
def get_widget(name): for widget in registry: if widget.__name__ == name: return widget raise WidgetNotFound( _('The widget %s has not been registered.') % name)
Give back a widget class according to his name.
def config_required(f): def new_func(obj, *args, **kwargs): if 'config' not in obj: click.echo(_style(obj.get('show_color', False), 'Could not find a valid configuration file!', fg='red', bold=True)) raise click.Abort() else: return f(obj, *args, **kwargs) return update_wrapper(new_func, f)
Decorator that checks whether a configuration file was set.
def copyNamespaceList(self): ret = libxml2mod.xmlCopyNamespaceList(self._o) if ret is None:raise treeError('xmlCopyNamespaceList() failed') __tmp = xmlNs(_obj=ret) return __tmp
Do a copy of an namespace list.
def __import_vars(self, env_file): with open(env_file, "r") as f: for line in f: try: line = line.lstrip() if line.startswith('export'): line = line.replace('export', '', 1) key, val = line.strip().split('=', 1) except ValueError: pass else: if not callable(val): if self.verbose_mode: if key in self.app.config: print( " * Overwriting an existing config var:" " {0}".format(key)) else: print( " * Setting an entirely new config var:" " {0}".format(key)) self.app.config[key] = re.sub( r"\A[\"']|[\"']\Z", "", val)
Actual importing function.
def text(self, data): data = data middle = data.lstrip(spaceCharacters) left = data[:len(data) - len(middle)] if left: yield {"type": "SpaceCharacters", "data": left} data = middle middle = data.rstrip(spaceCharacters) right = data[len(middle):] if middle: yield {"type": "Characters", "data": middle} if right: yield {"type": "SpaceCharacters", "data": right}
Generates SpaceCharacters and Characters tokens Depending on what's in the data, this generates one or more ``SpaceCharacters`` and ``Characters`` tokens. For example: >>> from html5lib.treewalkers.base import TreeWalker >>> # Give it an empty tree just so it instantiates >>> walker = TreeWalker([]) >>> list(walker.text('')) [] >>> list(walker.text(' ')) [{u'data': ' ', u'type': u'SpaceCharacters'}] >>> list(walker.text(' abc ')) # doctest: +NORMALIZE_WHITESPACE [{u'data': ' ', u'type': u'SpaceCharacters'}, {u'data': u'abc', u'type': u'Characters'}, {u'data': u' ', u'type': u'SpaceCharacters'}] :arg data: the text data :returns: one or more ``SpaceCharacters`` and ``Characters`` tokens
def inject_coordinates(self, x_coords, y_coords, rescale_x=None, rescale_y=None, original_x=None, original_y=None): self._verify_coordinates(x_coords, 'x') self._verify_coordinates(y_coords, 'y') self.x_coords = x_coords self.y_coords = y_coords self._rescale_x = rescale_x self._rescale_y = rescale_y self.original_x = x_coords if original_x is None else original_x self.original_y = y_coords if original_y is None else original_y
Inject custom x and y coordinates for each term into chart. Parameters ---------- x_coords: array-like positions on x-axis \in [0,1] y_coords: array-like positions on y-axis \in [0,1] rescale_x: lambda list[0,1]: list[0,1], default identity Rescales x-axis after filtering rescale_y: lambda list[0,1]: list[0,1], default identity Rescales y-axis after filtering original_x : array-like, optional Original, unscaled x-values. Defaults to x_coords original_y : array-like, optional Original, unscaled y-values. Defaults to y_coords Returns ------- self: ScatterChart
def delete_thumbnails(self, image_file): thumbnail_keys = self._get(image_file.key, identity='thumbnails') if thumbnail_keys: for key in thumbnail_keys: thumbnail = self._get(key) if thumbnail: self.delete(thumbnail, False) thumbnail.delete() self._delete(image_file.key, identity='thumbnails')
Deletes references to thumbnails as well as thumbnail ``image_files``.
def __set_whitelist(self, whitelist=None): self.whitelist = {} self.sanitizelist = ['script', 'style'] if isinstance(whitelist, dict) and '*' in whitelist.keys(): self.isNotPurify = True self.whitelist_keys = [] return else: self.isNotPurify = False self.whitelist.update(whitelist or {}) self.whitelist_keys = self.whitelist.keys()
Update default white list by customer white list
def _check_module_is_image_embedding(module_spec): issues = [] input_info_dict = module_spec.get_input_info_dict() if (list(input_info_dict.keys()) != ["images"] or input_info_dict["images"].dtype != tf.float32): issues.append("Module 'default' signature must require a single input, " "which must have type float32 and name 'images'.") else: try: image_util.get_expected_image_size(module_spec) except ValueError as e: issues.append("Module does not support hub.get_expected_image_size(); " "original error was:\n" + str(e)) output_info_dict = module_spec.get_output_info_dict() if "default" not in output_info_dict: issues.append("Module 'default' signature must have a 'default' output.") else: output_type = output_info_dict["default"].dtype output_shape = output_info_dict["default"].get_shape() if not (output_type == tf.float32 and output_shape.ndims == 2 and output_shape.dims[1].value): issues.append("Module 'default' signature must have a 'default' output " "of tf.Tensor(shape=(_,K), dtype=float32).") if issues: raise ValueError("Module is not usable as image embedding: %r" % issues)
Raises ValueError if `module_spec` is not usable as image embedding. Args: module_spec: A `_ModuleSpec` to test. Raises: ValueError: if `module_spec` default signature is not compatible with mappingan "images" input to a Tensor(float32, shape=(_,K)).
def t_php_OBJECT_OPERATOR(t): r'->' if re.match(r'[A-Za-z_]', peek(t.lexer)): t.lexer.push_state('property') return t
r'->
def dump(self): out = [] out.append(self.filetype) out.append("Format: {}".format(self.version)) out.append("Type: ASCII") out.append("") for cmd in self.commands: out.append(self.encode(cmd)) return "\n".join(out) + "\n"
Dump all commands in this object to a string. Returns: str: An encoded list of commands separated by \n characters suitable for saving to a file.
def concatenate_and_rewrite(self, paths, output_filename, variant=None): stylesheets = [] for path in paths: def reconstruct(match): quote = match.group(1) or '' asset_path = match.group(2) if NON_REWRITABLE_URL.match(asset_path): return "url(%s%s%s)" % (quote, asset_path, quote) asset_url = self.construct_asset_path(asset_path, path, output_filename, variant) return "url(%s)" % asset_url content = self.read_text(path) content = re.sub(URL_DETECTOR, reconstruct, content) stylesheets.append(content) return '\n'.join(stylesheets)
Concatenate together files and rewrite urls
def find_clique_embedding(k, m, n=None, t=None, target_edges=None): import random _, nodes = k m, n, t, target_edges = _chimera_input(m, n, t, target_edges) if len(nodes) == 1: qubits = set().union(*target_edges) qubit = random.choice(tuple(qubits)) embedding = [[qubit]] elif len(nodes) == 2: if not isinstance(target_edges, list): edges = list(target_edges) edge = edges[random.randrange(len(edges))] embedding = [[edge[0]], [edge[1]]] else: embedding = processor(target_edges, M=m, N=n, L=t).tightestNativeClique(len(nodes)) if not embedding: raise ValueError("cannot find a K{} embedding for given Chimera lattice".format(k)) return dict(zip(nodes, embedding))
Find an embedding for a clique in a Chimera graph. Given a target :term:`Chimera` graph size, and a clique (fully connect graph), attempts to find an embedding. Args: k (int/iterable): Clique to embed. If k is an integer, generates an embedding for a clique of size k labelled [0,k-1]. If k is an iterable, generates an embedding for a clique of size len(k), where iterable k is the variable labels. m (int): Number of rows in the Chimera lattice. n (int, optional, default=m): Number of columns in the Chimera lattice. t (int, optional, default 4): Size of the shore within each Chimera tile. target_edges (iterable[edge]): A list of edges in the target Chimera graph. Nodes are labelled as returned by :func:`~dwave_networkx.generators.chimera_graph`. Returns: dict: An embedding mapping a clique to the Chimera lattice. Examples: The first example finds an embedding for a :math:`K_4` complete graph in a single Chimera unit cell. The second for an alphanumerically labeled :math:`K_3` graph in 4 unit cells. >>> from dwave.embedding.chimera import find_clique_embedding ... >>> embedding = find_clique_embedding(4, 1, 1) >>> embedding # doctest: +SKIP {0: [4, 0], 1: [5, 1], 2: [6, 2], 3: [7, 3]} >>> from dwave.embedding.chimera import find_clique_embedding ... >>> embedding = find_clique_embedding(['a', 'b', 'c'], m=2, n=2, t=4) >>> embedding # doctest: +SKIP {'a': [20, 16], 'b': [21, 17], 'c': [22, 18]}
def lists(self, value, key=None): results = map(lambda x: x[value], self._items) return list(results)
Get a list with the values of a given key :rtype: list
def remove(self, objs): if self.readonly: raise NotImplementedError( '{} links can\'t be modified'.format(self._slug) ) if not self._parent.id: raise ObjectNotSavedException( "Links can not be modified before the object has been saved." ) _objs = [obj for obj in self._build_obj_list(objs) if obj in self] if not _objs: return _obj_ids = ",".join(_objs) self._parent.http_delete( '{}/links/{}/{}'.format(self._parent.id, self._slug, _obj_ids), retry=True, ) self._linked_object_ids = [ obj for obj in self._linked_object_ids if obj not in _objs ]
Removes the given `objs` from this `LinkCollection`. - **objs** can be a list of :py:class:`.PanoptesObject` instances, a list of object IDs, a single :py:class:`.PanoptesObject` instance, or a single object ID. Examples:: organization.links.projects.remove(1234) organization.links.projects.remove(Project(1234)) workflow.links.subject_sets.remove([1,2,3,4]) workflow.links.subject_sets.remove([Project(12), Project(34)])
def command_line(): from docopt import docopt doc = docopt( __doc__, version=VERSION ) args = pd.Series({k.replace('--', ''): v for k, v in doc.items()}) if args.all: graph = Graph2Pandas(args.file, _type='all') elif args.type: graph = Graph2Pandas(args.file, _type=args.type) else: graph = Graph2Pandas(args.file) graph.save(args.output)
If you want to use the command line
def get_by(self, field, value): if field == 'userName' or field == 'name': return self._client.get(self.URI + '/' + value) elif field == 'role': value = value.replace(" ", "%20") return self._client.get(self.URI + '/roles/users/' + value)['members'] else: raise HPOneViewException('Only userName, name and role can be queried for this resource.')
Gets all Users that match the filter. The search is case-insensitive. Args: field: Field name to filter. Accepted values: 'name', 'userName', 'role' value: Value to filter. Returns: list: A list of Users.
def process_tick(self, tick_tup): self._tick_counter += 1 self.ack(tick_tup) if self._tick_counter > self.ticks_between_batches and self._batches: self.process_batches() self._tick_counter = 0
Increment tick counter, and call ``process_batch`` for all current batches if tick counter exceeds ``ticks_between_batches``. See :class:`pystorm.component.Bolt` for more information. .. warning:: This method should **not** be overriden. If you want to tweak how Tuples are grouped into batches, override ``group_key``.
def solve_each(expr, vars): lhs_values, _ = __solve_for_repeated(expr.lhs, vars) for lhs_value in repeated.getvalues(lhs_values): result = solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)) if not result.value: return result._replace(value=False) return Result(True, ())
Return True if RHS evaluates to a true value with each state of LHS. If LHS evaluates to a normal IAssociative object then this is the same as a regular let-form, except the return value is always a boolean. If LHS evaluates to a repeared var (see efilter.protocols.repeated) of IAssociative objects then RHS will be evaluated with each state and True will be returned only if each result is true.
def setParentAnalysisRequest(self, value): self.Schema().getField("ParentAnalysisRequest").set(self, value) if not value: noLongerProvides(self, IAnalysisRequestPartition) else: alsoProvides(self, IAnalysisRequestPartition)
Sets a parent analysis request, making the current a partition
def error(self, line_number, offset, text, check): code = super(_PycodestyleReport, self).error( line_number, offset, text, check) if code: self.errors.append(dict( text=text, type=code.replace('E', 'C'), col=offset + 1, lnum=line_number, ))
Save errors.
def set_walltime(self, walltime): if not isinstance(walltime, timedelta): raise TypeError( 'walltime must be an instance of datetime.timedelta. %s given' % type(walltime) ) self._options['walltime'] = walltime return self
Setting a walltime for the job >>> job.set_walltime(datetime.timedelta(hours=2, minutes=30)) :param walltime: Walltime of the job (an instance of timedelta) :returns: self :rtype: self
def recv_rpc(self, context, payload): logger.debug("Adding RPC payload to ControlBuffer queue: %s", payload) self.buf.put(('rpc', (context, payload))) with self.cv: self.cv.notifyAll()
Call from any thread
def _process_pod_rate(self, metric_name, metric, scraper_config): if metric.type not in METRIC_TYPES: self.log.error("Metric type %s unsupported for metric %s" % (metric.type, metric.name)) return samples = self._sum_values_by_context(metric, self._get_pod_uid_if_pod_metric) for pod_uid, sample in iteritems(samples): if '.network.' in metric_name and self._is_pod_host_networked(pod_uid): continue tags = tagger.tag('kubernetes_pod://%s' % pod_uid, tagger.HIGH) tags += scraper_config['custom_tags'] val = sample[self.SAMPLE_VALUE] self.rate(metric_name, val, tags)
Takes a simple metric about a pod, reports it as a rate. If several series are found for a given pod, values are summed before submission.
def index_resources(self): if not self.__index_resources: self.__index_resources = IndexResources(self.__connection) return self.__index_resources
Gets the Index Resources API client. Returns: IndexResources:
def rebase(self, text, char='X'): regexp = re.compile(r'\b(%s)\b' % '|'.join(self.collection), re.IGNORECASE | re.UNICODE) def replace(m): word = m.group(1) return char * len(word) return regexp.sub(replace, text)
Rebases text with stop words removed.
def fill_subparser(subparser): urls = ([None] * len(ALL_FILES)) filenames = list(ALL_FILES) subparser.set_defaults(urls=urls, filenames=filenames) subparser.add_argument('-P', '--url-prefix', type=str, default=None, help="URL prefix to prepend to the filenames of " "non-public files, in order to download them. " "Be sure to include the trailing slash.") return default_downloader
Sets up a subparser to download the ILSVRC2012 dataset files. Note that you will need to use `--url-prefix` to download the non-public files (namely, the TARs of images). This is a single prefix that is common to all distributed files, which you can obtain by registering at the ImageNet website [DOWNLOAD]. Note that these files are quite large and you may be better off simply downloading them separately and running ``fuel-convert``. .. [DOWNLOAD] http://www.image-net.org/download-images Parameters ---------- subparser : :class:`argparse.ArgumentParser` Subparser handling the `ilsvrc2012` command.
def build_query(self, sql, lookup): for key, val in six.iteritems(lookup): sql = sql.replace("$" + key, val) return sql
Modify table and field name variables in a sql string with a dict. This seems to be discouraged by psycopg2 docs but it makes small adjustments to large sql strings much easier, making prepped queries much more versatile. USAGE sql = 'SELECT $myInputField FROM $myInputTable' lookup = {'myInputField':'customer_id', 'myInputTable':'customers'} sql = db.build_query(sql, lookup)
def distros_for_filename(filename, metadata=None): return distros_for_location( normalize_path(filename), os.path.basename(filename), metadata )
Yield possible egg or source distribution objects based on a filename
def get_report_rst(self): res = '' res += '-----------------------------------\n' res += self.nme + '\n' res += '-----------------------------------\n\n' res += self.desc + '\n' res += self.fldr + '\n\n' res += '.. contents:: \n\n\n' res += 'Overview\n' + '===========================================\n\n' res += 'This document contains details on the project ' + self.nme + '\n\n' for d in self.details: res += ' - ' + d[0] + ' = ' + d[1] + '\n\n' res += '\nTABLES\n' + '===========================================\n\n' for t in self.datatables: res += t.name + '\n' res += '-------------------------\n\n' res += t.format_rst() + '\n\n' return res
formats the project into a report in RST format
def parse_address(text: str) -> Tuple[str, int]: match = re.search( r'\(' r'(\d{1,3})\s*,' r'\s*(\d{1,3})\s*,' r'\s*(\d{1,3})\s*,' r'\s*(\d{1,3})\s*,' r'\s*(\d{1,3})\s*,' r'\s*(\d{1,3})\s*' r'\)', text) if match: return ( '{0}.{1}.{2}.{3}'.format(int(match.group(1)), int(match.group(2)), int(match.group(3)), int(match.group(4)) ), int(match.group(5)) << 8 | int(match.group(6)) ) else: raise ValueError('No address found')
Parse PASV address.
def check_and_consume(self): if self._count < 1.0: self._fill() consumable = self._count >= 1.0 if consumable: self._count -= 1.0 self.throttle_count = 0 else: self.throttle_count += 1 return consumable
Returns True if there is currently at least one token, and reduces it by one.
def removeSubscriber(self, email): headers, raw_data = self._perform_subscribe() missing_flag, raw_data = self._remove_subscriber(email, raw_data) if missing_flag: return self._update_subscribe(headers, raw_data) self.log.info("Successfully remove a subscriber: %s for <Workitem %s>", email, self)
Remove a subscriber from this workitem If the subscriber has not been added, no more actions will be performed. :param email: the subscriber's email
def _make_request(self, opener, request, timeout=None): timeout = timeout or self.timeout try: return opener.open(request, timeout=timeout) except HTTPError as err: exc = handle_error(err) exc.__cause__ = None raise exc
Make the API call and return the response. This is separated into it's own function, so we can mock it easily for testing. :param opener: :type opener: :param request: url payload to request :type request: urllib.Request object :param timeout: timeout value or None :type timeout: float :return: urllib response
def generate_certificate(self, common_name, public_key_algorithm='rsa', signature_algorithm='rsa_sha_512', key_length=2048, signing_ca=None): return GatewayCertificate._create(self, common_name, public_key_algorithm, signature_algorithm, key_length, signing_ca)
Generate an internal gateway certificate used for VPN on this engine. Certificate request should be an instance of VPNCertificate. :param: str common_name: common name for certificate :param str public_key_algorithm: public key type to use. Valid values rsa, dsa, ecdsa. :param str signature_algorithm: signature algorithm. Valid values dsa_sha_1, dsa_sha_224, dsa_sha_256, rsa_md5, rsa_sha_1, rsa_sha_256, rsa_sha_384, rsa_sha_512, ecdsa_sha_1, ecdsa_sha_256, ecdsa_sha_384, ecdsa_sha_512. (Default: rsa_sha_512) :param int key_length: length of key. Key length depends on the key type. For example, RSA keys can be 1024, 2048, 3072, 4096. See SMC documentation for more details. :param str,VPNCertificateCA signing_ca: by default will use the internal RSA CA :raises CertificateError: error generating certificate :return: GatewayCertificate
def variant_to_list(obj): if isinstance(obj, list): return obj elif is_unicode_string(obj): return [s for s in obj.split() if len(s) > 0] elif isinstance(obj, set) or isinstance(obj, frozenset): return list(obj) raise TypeError("The given value must be a list or a set of descriptor strings, or a Unicode string.")
Return a list containing the descriptors in the given object. The ``obj`` can be a list or a set of descriptor strings, or a Unicode string. If ``obj`` is a Unicode string, it will be split using spaces as delimiters. :param variant obj: the object to be parsed :rtype: list :raise TypeError: if the ``obj`` has a type not listed above
def _create_word_graph_file(name, file_storage, word_set): word_graph_file = file_storage.create_file(name) spelling.wordlist_to_graph_file(sorted(list(word_set)), word_graph_file) return copy_to_ram(file_storage).open_file(name)
Create a word graph file and open it in memory.
def redirect_to_lang(*args, **kwargs): endpoint = request.endpoint.replace('_redirect', '') kwargs = multi_to_dict(request.args) kwargs.update(request.view_args) kwargs['lang_code'] = default_lang return redirect(url_for(endpoint, **kwargs))
Redirect non lang-prefixed urls to default language.
def _control_line(self, line): if line > float(self.LINE_LAST_PIXEL): return int(self.LINE_LAST_PIXEL) elif line < float(self.LINE_FIRST_PIXEL): return int(self.LINE_FIRST_PIXEL) else: return line
Control the asked line is ok
def message_interactions(self): if self._message_interactions is None: self._message_interactions = MessageInteractionList( self._version, service_sid=self._solution['service_sid'], session_sid=self._solution['session_sid'], participant_sid=self._solution['sid'], ) return self._message_interactions
Access the message_interactions :returns: twilio.rest.proxy.v1.service.session.participant.message_interaction.MessageInteractionList :rtype: twilio.rest.proxy.v1.service.session.participant.message_interaction.MessageInteractionList
def list_users(self): url = self.bucket_url + '/_user/' response = requests.get(url) response = response.json() return response
a method to list all the user ids of all users in the bucket
def from_string(species_string: str): m = re.search(r"([A-Z][a-z]*)([0-9.]*)([+\-])(.*)", species_string) if m: sym = m.group(1) oxi = 1 if m.group(2) == "" else float(m.group(2)) oxi = -oxi if m.group(3) == "-" else oxi properties = None if m.group(4): toks = m.group(4).replace(",", "").split("=") properties = {toks[0]: float(toks[1])} return Specie(sym, oxi, properties) else: raise ValueError("Invalid Species String")
Returns a Specie from a string representation. Args: species_string (str): A typical string representation of a species, e.g., "Mn2+", "Fe3+", "O2-". Returns: A Specie object. Raises: ValueError if species_string cannot be intepreted.
def copy_node_info(src, dest): for attr in ['lineno', 'fromlineno', 'tolineno', 'col_offset', 'parent']: if hasattr(src, attr): setattr(dest, attr, getattr(src, attr))
Copy information from src to dest Every node in the AST has to have line number information. Get the information from the old stmt.
def _get_callargs(self, *args, **kwargs): callargs = getcallargs(self.func, *args, **kwargs) return callargs
Retrieve all arguments that `self.func` needs and return a dictionary with call arguments.
def setup_actions(self): self.actionOpen.triggered.connect(self.on_open) self.actionNew.triggered.connect(self.on_new) self.actionSave.triggered.connect(self.on_save) self.actionSave_as.triggered.connect(self.on_save_as) self.actionQuit.triggered.connect(QtWidgets.QApplication.instance().quit) self.tabWidget.current_changed.connect( self.on_current_tab_changed) self.actionAbout.triggered.connect(self.on_about)
Connects slots to signals
def t_newline(self, t): r'\n' t.lexer.lineno += 1 t.lexer.latest_newline = t.lexpos
r'\n
def import_keypair(kwargs=None, call=None): with salt.utils.files.fopen(kwargs['file'], 'r') as public_key_filename: public_key_content = salt.utils.stringutils.to_unicode(public_key_filename.read()) digitalocean_kwargs = { 'name': kwargs['keyname'], 'public_key': public_key_content } created_result = create_key(digitalocean_kwargs, call=call) return created_result
Upload public key to cloud provider. Similar to EC2 import_keypair. .. versionadded:: 2016.11.0 kwargs file(mandatory): public key file-name keyname(mandatory): public key name in the provider
def config(self, config): for section, data in config.items(): for variable, value in data.items(): self.set_value(section, variable, value)
Set config values from config dictionary.
def delete_proficiency(self, proficiency_id): collection = JSONClientValidated('learning', collection='Proficiency', runtime=self._runtime) if not isinstance(proficiency_id, ABCId): raise errors.InvalidArgument('the argument is not a valid OSID Id') proficiency_map = collection.find_one( dict({'_id': ObjectId(proficiency_id.get_identifier())}, **self._view_filter())) objects.Proficiency(osid_object_map=proficiency_map, runtime=self._runtime, proxy=self._proxy)._delete() collection.delete_one({'_id': ObjectId(proficiency_id.get_identifier())})
Deletes a ``Proficiency``. arg: proficiency_id (osid.id.Id): the ``Id`` of the ``Proficiency`` to remove raise: NotFound - ``proficiency_id`` not found raise: NullArgument - ``proficiency_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def make_random(cls): self = object.__new__(cls) self.rank = Rank.make_random() self.suit = Suit.make_random() return self
Returns a random Card instance.
def _read_addr_resolve(self, length, htype): if htype == 1: _byte = self._read_fileng(6) _addr = '-'.join(textwrap.wrap(_byte.hex(), 2)) else: _addr = self._read_fileng(length) return _addr
Resolve MAC address according to protocol. Positional arguments: * length -- int, hardware address length * htype -- int, hardware type Returns: * str -- MAC address
def index(): identity = g.identity actions = {} for action in access.actions.values(): actions[action.value] = DynamicPermission(action).allows(identity) if current_user.is_anonymous: return render_template("invenio_access/open.html", actions=actions, identity=identity) else: return render_template("invenio_access/limited.html", message='', actions=actions, identity=identity)
Basic test view.
def autodiscover_media_extensions(): import copy from django.conf import settings from django.utils.module_loading import module_has_submodule for app in settings.INSTALLED_APPS: mod = import_module(app) try: import_module('%s.media_extension' % app) except: if module_has_submodule(mod, 'media_extension'): raise
Auto-discover INSTALLED_APPS media_extensions.py modules and fail silently when not present. This forces an import on them to register any media extension bits they may want. Rip of django.contrib.admin.autodiscover()
def check_honeypot(func=None, field_name=None): if isinstance(func, six.string_types): func, field_name = field_name, func def decorated(func): def inner(request, *args, **kwargs): response = verify_honeypot_value(request, field_name) if response: return response else: return func(request, *args, **kwargs) return wraps(func, assigned=available_attrs(func))(inner) if func is None: def decorator(func): return decorated(func) return decorator return decorated(func)
Check request.POST for valid honeypot field. Takes an optional field_name that defaults to HONEYPOT_FIELD_NAME if not specified.
def _apply_line_rules(lines, commit, rules, line_nr_start): all_violations = [] line_nr = line_nr_start for line in lines: for rule in rules: violations = rule.validate(line, commit) if violations: for violation in violations: violation.line_nr = line_nr all_violations.append(violation) line_nr += 1 return all_violations
Iterates over the lines in a given list of lines and validates a given list of rules against each line
def set_all_curriculums_to_lesson_num(self, lesson_num): for _, curriculum in self.brains_to_curriculums.items(): curriculum.lesson_num = lesson_num
Sets all the curriculums in this meta curriculum to a specified lesson number. Args: lesson_num (int): The lesson number which all the curriculums will be set to.
async def set(self, *args, **kwargs): return await _maybe_await(self.event.set(*args, **kwargs))
Sets the value of the event.
def welcome(self, user): self.send_message(create_message('RoomServer', 'Please welcome {name} to the server!\nThere are currently {i} users online -\n {r}\n'.format(name=user.id, i=self.amount_of_users_connected, r=' '.join(self.user_names)))) logging.debug('Welcoming user {user} to {room}'.format(user=user.id.name, room=self.name))
welcomes a user to the roomserver
def run(self): def receive(): if self.with_communicate: self.stdout, self.stderr = self.process.communicate(input=self.stdin) elif self.wait: self.process.wait() if not self.timeout: receive() else: rt = threading.Thread(target=receive) rt.start() rt.join(self.timeout) if rt.isAlive(): self.process.kill() def terminate(): if rt.isAlive(): self.process.terminate() threading.Timer(10, terminate).start() raise salt.exceptions.TimedProcTimeoutError( '{0} : Timed out after {1} seconds'.format( self.command, six.text_type(self.timeout), ) ) return self.process.returncode
wait for subprocess to terminate and return subprocess' return code. If timeout is reached, throw TimedProcTimeoutError
def remove(self, spec_or_id, multi=True, *args, **kwargs): if multi: return self.delete_many(spec_or_id) return self.delete_one(spec_or_id)
Backwards compatibility with remove
def archive(self): for path, dirs, files in os.walk(self.work_path, topdown=False): for f_name in files: f_path = os.path.join(path, f_name) if os.path.islink(f_path) or os.path.getsize(f_path) == 0: os.remove(f_path) if len(os.listdir(path)) == 0: os.rmdir(path)
Store model output to laboratory archive.
def keys_info(cls, fqdn, key): return cls.json_get('%s/domains/%s/keys/%s' % (cls.api_url, fqdn, key))
Retrieve key information.
def update(sc, filename, asset_id): addresses = [] with open(filename) as hostfile: for line in hostfile.readlines(): addresses.append(line.strip('\n')) sc.asset_update(asset_id, dns=addresses)
Updates a DNS Asset List with the contents of the filename. The assumed format of the file is 1 entry per line. This function will convert the file contents into an array of entries and then upload that array into SecurityCenter.
def clean_source_index(self): cleanup_timer = Timer() cleanup_counter = 0 for entry in os.listdir(self.config.source_index): pathname = os.path.join(self.config.source_index, entry) if os.path.islink(pathname) and not os.path.exists(pathname): logger.warn("Cleaning up broken symbolic link: %s", pathname) os.unlink(pathname) cleanup_counter += 1 logger.debug("Cleaned up %i broken symbolic links from source index in %s.", cleanup_counter, cleanup_timer)
Cleanup broken symbolic links in the local source distribution index. The purpose of this method requires some context to understand. Let me preface this by stating that I realize I'm probably overcomplicating things, but I like to preserve forward / backward compatibility when possible and I don't feel like dropping everyone's locally cached source distribution archives without a good reason to do so. With that out of the way: - Versions of pip-accel based on pip 1.4.x maintained a local source distribution index based on a directory containing symbolic links pointing directly into pip's download cache. When files were removed from pip's download cache, broken symbolic links remained in pip-accel's local source distribution index directory. This resulted in very confusing error messages. To avoid this :func:`clean_source_index()` cleaned up broken symbolic links whenever pip-accel was about to invoke pip. - More recent versions of pip (6.x) no longer support the same style of download cache that contains source distribution archives that can be re-used directly by pip-accel. To cope with the changes in pip 6.x new versions of pip-accel tell pip to download source distribution archives directly into the local source distribution index directory maintained by pip-accel. - It is very reasonable for users of pip-accel to have multiple versions of pip-accel installed on their system (imagine a dozen Python virtual environments that won't all be updated at the same time; this is the situation I always find myself in :-). These versions of pip-accel will be sharing the same local source distribution index directory. - All of this leads up to the local source distribution index directory containing a mixture of symbolic links and regular files with no obvious way to atomically and gracefully upgrade the local source distribution index directory while avoiding fights between old and new versions of pip-accel :-). - I could of course switch to storing the new local source distribution index in a differently named directory (avoiding potential conflicts between multiple versions of pip-accel) but then I would have to introduce a new configuration option, otherwise everyone who has configured pip-accel to store its source index in a non-default location could still be bitten by compatibility issues. For now I've decided to keep using the same directory for the local source distribution index and to keep cleaning up broken symbolic links. This enables cooperating between old and new versions of pip-accel and avoids trashing user's local source distribution indexes. The main disadvantage is that pip-accel is still required to clean up broken symbolic links...
def _bnd(self, xloc, dist, length, cache): lower, upper = evaluation.evaluate_bound( dist, xloc.reshape(1, -1)) lower = lower.reshape(length, -1) upper = upper.reshape(length, -1) assert lower.shape == xloc.shape, (lower.shape, xloc.shape) assert upper.shape == xloc.shape return lower, upper
boundary function. Example: >>> print(chaospy.Iid(chaospy.Uniform(0, 2), 2).range( ... [[0.1, 0.2, 0.3], [0.2, 0.2, 0.3]])) [[[0. 0. 0.] [0. 0. 0.]] <BLANKLINE> [[2. 2. 2.] [2. 2. 2.]]]
def _gen_back(self): back = set() if self.backends: for backend in self.backends: fun = '{0}.targets'.format(backend) if fun in self.rosters: back.add(backend) return back return sorted(back)
Return a list of loaded roster backends
def data_from_dict(self, data): nvars = [] for key, val in data.items(): self.__dict__[key].extend(val) if len(nvars) > 1 and len(val) != nvars[-1]: raise IndexError( 'Model <{}> parameter <{}> must have the same length'. format(self._name, key)) nvars.append(len(val)) for i, idx in zip(range(self.n), self.idx): self.uid[idx] = i
Populate model parameters from a dictionary of parameters Parameters ---------- data : dict List of parameter dictionaries Returns ------- None
def makeDirectory(self, full_path, dummy = 40841): if full_path[-1] is not '/': full_path += '/' data = {'dstresource': full_path, 'userid': self.user_id, 'useridx': self.useridx, 'dummy': dummy, } s, metadata = self.POST('makeDirectory', data) return s
Make a directory >>> nd.makeDirectory('/test') :param full_path: The full path to get the directory property. Should be end with '/'. :return: ``True`` when success to make a directory or ``False``
def split_list( list_object = None, granularity = None ): if granularity < 0: raise Exception("negative granularity") mean_length = len(list_object) / float(granularity) split_list_object = [] last_length = float(0) if len(list_object) > granularity: while last_length < len(list_object): split_list_object.append( list_object[int(last_length):int(last_length + mean_length)] ) last_length += mean_length else: split_list_object = [[element] for element in list_object] return split_list_object
This function splits a list into a specified number of lists. It returns a list of lists that correspond to these parts. Negative numbers of parts are not accepted and numbers of parts greater than the number of elements in the list result in the maximum possible number of lists being returned.
def __GetRequestField(self, method_description, body_type): body_field_name = self.__BodyFieldName(body_type) if body_field_name in method_description.get('parameters', {}): body_field_name = self.__names.FieldName( '%s_resource' % body_field_name) while body_field_name in method_description.get('parameters', {}): body_field_name = self.__names.FieldName( '%s_body' % body_field_name) return body_field_name
Determine the request field for this method.
def reply(self, ticket_id, text='', cc='', bcc='', content_type='text/plain', files=[]): return self.__correspond(ticket_id, text, 'correspond', cc, bcc, content_type, files)
Sends email message to the contacts in ``Requestors`` field of given ticket with subject as is set in ``Subject`` field. Form of message according to documentation:: id: <ticket-id> Action: correspond Text: the text comment second line starts with the same indentation as first Cc: <...> Bcc: <...> TimeWorked: <...> Attachment: an attachment filename/path :param ticket_id: ID of ticket to which message belongs :keyword text: Content of email message :keyword content_type: Content type of email message, default to text/plain :keyword cc: Carbon copy just for this reply :keyword bcc: Blind carbon copy just for this reply :keyword files: Files to attach as multipart/form-data List of 2/3 tuples: (filename, file-like object, [content type]) :returns: ``True`` Operation was successful ``False`` Sending failed (status code != 200) :raises BadRequest: When ticket does not exist
def create_session_engine(uri=None, cfg=None): if uri is not None: eng = sa.create_engine(uri) elif cfg is not None: eng = sa.create_engine(cfg.get('db', 'SA_ENGINE_URI')) else: raise IOError("unable to connect to SQL database") ses = orm.sessionmaker(bind=eng)() return ses, eng
Create an sqlalchemy session and engine. :param str uri: The database URI to connect to :param cfg: The configuration object with database URI info. :return: The session and the engine as a list (in that order)