code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def read_csv(fname): values = defaultdict(list) with open(fname) as f: reader = csv.DictReader(f) for row in reader: for (k,v) in row.items(): values[k].append(v) npvalues = {k: np.array(values[k]) for k in values.keys()} for k in npvalues.keys(): for datatype in [np.int, np.float]: try: npvalues[k][:1].astype(datatype) npvalues[k] = npvalues[k].astype(datatype) break except: pass dao = DataAccessObject(npvalues) return dao
Read a csv file into a DataAccessObject :param fname: filename
def tags_newer(self, versions_file, majors): highest = versions_file.highest_version_major(majors) all = self.tags_get() newer = _newer_tags_get(highest, all) if len(newer) == 0: raise RuntimeError("No new tags found.") return newer
Checks this git repo tags for newer versions. @param versions_file: a common.VersionsFile instance to check against. @param majors: a list of major branches to check. E.g. ['6', '7'] @raise RuntimeError: no newer tags were found. @raise MissingMajorException: A new version from a newer major branch is exists, but hasn't been downloaded due to it not being in majors.
def open(self, title): try: properties = finditem( lambda x: x['name'] == title, self.list_spreadsheet_files() ) properties['title'] = properties['name'] return Spreadsheet(self, properties) except StopIteration: raise SpreadsheetNotFound
Opens a spreadsheet. :param title: A title of a spreadsheet. :type title: str :returns: a :class:`~gspread.models.Spreadsheet` instance. If there's more than one spreadsheet with same title the first one will be opened. :raises gspread.SpreadsheetNotFound: if no spreadsheet with specified `title` is found. >>> c = gspread.authorize(credentials) >>> c.open('My fancy spreadsheet')
def __x_product_aux (property_sets, seen_features): assert is_iterable_typed(property_sets, property_set.PropertySet) assert isinstance(seen_features, set) if not property_sets: return ([], set()) properties = property_sets[0].all() these_features = set() for p in property_sets[0].non_free(): these_features.add(p.feature) if these_features & seen_features: (inner_result, inner_seen) = __x_product_aux(property_sets[1:], seen_features) return (inner_result, inner_seen | these_features) else: result = [] (inner_result, inner_seen) = __x_product_aux(property_sets[1:], seen_features | these_features) if inner_result: for inner in inner_result: result.append(properties + inner) else: result.append(properties) if inner_seen & these_features: (inner_result2, inner_seen2) = __x_product_aux(property_sets[1:], seen_features) result.extend(inner_result2) return (result, inner_seen | these_features)
Returns non-conflicting combinations of property sets. property_sets is a list of PropertySet instances. seen_features is a set of Property instances. Returns a tuple of: - list of lists of Property instances, such that within each list, no two Property instance have the same feature, and no Property is for feature in seen_features. - set of features we saw in property_sets
def appname(path=None): if path is None: path = sys.argv[0] name = os.path.basename(os.path.splitext(path)[0]) if name == 'mod_wsgi': name = 'nvn_web' return name
Return a useful application name based on the program argument. A special case maps 'mod_wsgi' to a more appropriate name so web applications show up as our own.
def enu2ecef(e1: float, n1: float, u1: float, lat0: float, lon0: float, h0: float, ell: Ellipsoid = None, deg: bool = True) -> Tuple[float, float, float]: x0, y0, z0 = geodetic2ecef(lat0, lon0, h0, ell, deg=deg) dx, dy, dz = enu2uvw(e1, n1, u1, lat0, lon0, deg=deg) return x0 + dx, y0 + dy, z0 + dz
ENU to ECEF Parameters ---------- e1 : float or numpy.ndarray of float target east ENU coordinate (meters) n1 : float or numpy.ndarray of float target north ENU coordinate (meters) u1 : float or numpy.ndarray of float target up ENU coordinate (meters) lat0 : float Observer geodetic latitude lon0 : float Observer geodetic longitude h0 : float observer altitude above geodetic ellipsoid (meters) ell : Ellipsoid, optional reference ellipsoid deg : bool, optional degrees input/output (False: radians in/out) Results ------- x : float or numpy.ndarray of float target x ECEF coordinate (meters) y : float or numpy.ndarray of float target y ECEF coordinate (meters) z : float or numpy.ndarray of float target z ECEF coordinate (meters)
def get_maybe_base_expanded_node_name(self, node_name, run_key, device_name): device_name = tf.compat.as_str(device_name) if run_key not in self._run_key_to_original_graphs: raise ValueError('Unknown run_key: %s' % run_key) if device_name not in self._run_key_to_original_graphs[run_key]: raise ValueError( 'Unknown device for run key "%s": %s' % (run_key, device_name)) return self._run_key_to_original_graphs[ run_key][device_name].maybe_base_expanded_node_name(node_name)
Obtain possibly base-expanded node name. Base-expansion is the transformation of a node name which happens to be the name scope of other nodes in the same graph. For example, if two nodes, called 'a/b' and 'a/b/read' in a graph, the name of the first node will be base-expanded to 'a/b/(b)'. This method uses caching to avoid unnecessary recomputation. Args: node_name: Name of the node. run_key: The run key to which the node belongs. graph_def: GraphDef to which the node belongs. Raises: ValueError: If `run_key` and/or `device_name` do not exist in the record.
def iris(display=False): d = sklearn.datasets.load_iris() df = pd.DataFrame(data=d.data, columns=d.feature_names) if display: return df, [d.target_names[v] for v in d.target] else: return df, d.target
Return the classic iris data in a nice package.
def GetTopLevel(self, file_object): try: top_level_object = biplist.readPlist(file_object) except (biplist.InvalidPlistException, biplist.NotBinaryPlistException) as exception: raise errors.UnableToParseFile( 'Unable to parse plist with error: {0!s}'.format(exception)) return top_level_object
Returns the deserialized content of a plist as a dictionary object. Args: file_object (dfvfs.FileIO): a file-like object to parse. Returns: dict[str, object]: contents of the plist. Raises: UnableToParseFile: when the file cannot be parsed.
def polyline(self, arr): for i in range(0, len(arr) - 1): self.line(arr[i][0], arr[i][1], arr[i + 1][0], arr[i + 1][1])
Draw a set of lines
def application_name(self): if self._application_name is None and self.units: self._application_name = self.units[0].unit_name.split('/')[0] return self._application_name
The name of the remote application for this relation, or ``None``. This is equivalent to:: relation.units[0].unit_name.split('/')[0]
def visit_FunctionDef(self, node): assert self.current_function is None self.current_function = node self.naming = dict() self.in_cond = False self.generic_visit(node) self.current_function = None
Initialize variable for the current function to add edges from calls. We compute variable to call dependencies and add edges when returns are reach.
def get_version(*args): contents = get_contents(*args) metadata = dict(re.findall('__([a-z]+)__ = [\'"]([^\'"]+)', contents)) return metadata['version']
Extract the version number from a Python module.
def unfinished_objects(self): mask = self._end_isnull if self._rbound is not None: mask = mask | (self._end > self._rbound) oids = set(self[mask]._oid.tolist()) return self[self._oid.apply(lambda oid: oid in oids)]
Leaves only versions of those objects that has some version with `_end == None` or with `_end > right cutoff`.
def get_subdomain_info(fqn, db_path=None, atlasdb_path=None, zonefiles_dir=None, check_pending=False, include_did=False): opts = get_blockstack_opts() if not is_subdomains_enabled(opts): log.warn("Subdomain support is disabled") return None if db_path is None: db_path = opts['subdomaindb_path'] if zonefiles_dir is None: zonefiles_dir = opts['zonefiles'] if atlasdb_path is None: atlasdb_path = opts['atlasdb_path'] db = SubdomainDB(db_path, zonefiles_dir) try: subrec = db.get_subdomain_entry(fqn) except SubdomainNotFound: log.warn("No such subdomain: {}".format(fqn)) return None if check_pending: subrec.pending = db.subdomain_check_pending(subrec, atlasdb_path) if include_did: subrec.did_info = db.get_subdomain_DID_info(fqn) return subrec
Static method for getting the state of a subdomain, given its fully-qualified name. Return the subdomain record on success. Return None if not found.
def enable_caching(self): "Enable the cache of this object." self.caching_enabled = True for c in self.values(): c.enable_cacher()
Enable the cache of this object.
def meld(*values): values = [x for x in values if x is not None] if not values: return None result = repeated(*values) if isrepeating(result): return result return getvalue(result)
Return the repeated value, or the first value if there's only one. This is a convenience function, equivalent to calling getvalue(repeated(x)) to get x. This function skips over instances of None in values (None is not allowed in repeated variables). Examples: meld("foo", "bar") # => ListRepetition("foo", "bar") meld("foo", "foo") # => ListRepetition("foo", "foo") meld("foo", None) # => "foo" meld(None) # => None
def wc_wrap(text, length): line_words = [] line_len = 0 words = re.split(r"\s+", text.strip()) for word in words: word_len = wcswidth(word) if line_words and line_len + word_len > length: line = " ".join(line_words) if line_len <= length: yield line else: yield from _wc_hard_wrap(line, length) line_words = [] line_len = 0 line_words.append(word) line_len += word_len + 1 if line_words: line = " ".join(line_words) if line_len <= length: yield line else: yield from _wc_hard_wrap(line, length)
Wrap text to given length, breaking on whitespace and taking into account character width. Meant for use on a single line or paragraph. Will destroy spacing between words and paragraphs and any indentation.
def convert(input_file_name, **kwargs): delimiter = kwargs["delimiter"] or "," quotechar = kwargs["quotechar"] or "|" if six.PY2: delimiter = delimiter.encode("utf-8") quotechar = quotechar.encode("utf-8") with open(input_file_name, "rb") as input_file: reader = csv.reader(input_file, encoding="utf-8", delimiter=delimiter, quotechar=quotechar) csv_headers = [] if not kwargs.get("no_header"): csv_headers = next(reader) csv_rows = [row for row in reader if row] if not csv_headers and len(csv_rows) > 0: end = len(csv_rows[0]) + 1 csv_headers = ["Column {}".format(n) for n in range(1, end)] html = render_template(csv_headers, csv_rows, **kwargs) return freeze_js(html)
Convert CSV file to HTML table
def is_hex_string(string): pattern = re.compile(r'[A-Fa-f0-9]+') if isinstance(string, six.binary_type): string = str(string) return pattern.match(string) is not None
Check if the string is only composed of hex characters.
def make_env(env_type, real_env, sim_env_kwargs): return { "real": lambda: real_env.new_like( batch_size=sim_env_kwargs["batch_size"], store_rollouts=False, ), "simulated": lambda: rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames( **sim_env_kwargs ), }[env_type]()
Factory function for envs.
def expr(s): prog = re.compile('\{([^}]+)\}') def repl(matchobj): return "rec['%s']" % matchobj.group(1) return eval("lambda rec: " + prog.sub(repl, s))
Construct a function operating on a table record. The expression string is converted into a lambda function by prepending the string with ``'lambda rec: '``, then replacing anything enclosed in curly braces (e.g., ``"{foo}"``) with a lookup on the record (e.g., ``"rec['foo']"``), then finally calling :func:`eval`. So, e.g., the expression string ``"{foo} * {bar}"`` is converted to the function ``lambda rec: rec['foo'] * rec['bar']``
def _api_group_for_type(cls): _groups = { (u"v1beta1", u"Deployment"): u"extensions", (u"v1beta1", u"DeploymentList"): u"extensions", (u"v1beta1", u"ReplicaSet"): u"extensions", (u"v1beta1", u"ReplicaSetList"): u"extensions", } key = ( cls.apiVersion, cls.__name__.rsplit(u".")[-1], ) group = _groups.get(key, None) return group
Determine which Kubernetes API group a particular PClass is likely to belong with. This is basically nonsense. The question being asked is wrong. An abstraction has failed somewhere. Fixing that will get rid of the need for this.
def _get_best_indexes(logits, n_best_size): index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes
Get the n-best logits from a list.
def apply_published_filter(self, queryset, operation, value): if operation not in ["after", "before"]: raise ValueError() return queryset.filter(Published(**{operation: value}))
Add the appropriate Published filter to a given elasticsearch query. :param queryset: The DJES queryset object to be filtered. :param operation: The type of filter (before/after). :param value: The date or datetime value being applied to the filter.
def get_corpus_path(name: str) -> [str, None]: db = TinyDB(corpus_db_path()) temp = Query() if len(db.search(temp.name == name)) > 0: path = get_full_data_path(db.search(temp.name == name)[0]["file"]) db.close() if not os.path.exists(path): download(name) return path return None
Get corpus path :param string name: corpus name
def _create_fw_fab_dev_te(self, tenant_id, drvr_name, fw_dict): is_fw_virt = self.is_device_virtual() ret = self.fabric.prepare_fabric_fw(tenant_id, fw_dict, is_fw_virt, fw_constants.RESULT_FW_CREATE_INIT) if not ret: LOG.error("Prepare Fabric failed") return else: self.update_fw_db_final_result(fw_dict.get('fw_id'), ( fw_constants.RESULT_FW_CREATE_DONE)) ret = self.create_fw_device(tenant_id, fw_dict.get('fw_id'), fw_dict) if ret: self.fwid_attr[tenant_id].fw_drvr_created(True) self.update_fw_db_dev_status(fw_dict.get('fw_id'), 'SUCCESS') LOG.info("FW device create returned success for tenant %s", tenant_id) else: LOG.error("FW device create returned failure for tenant %s", tenant_id)
Prepares the Fabric and configures the device. This routine calls the fabric class to prepare the fabric when a firewall is created. It also calls the device manager to configure the device. It updates the database with the final result.
def decode(symbol_string, checksum=False, strict=False): symbol_string = normalize(symbol_string, strict=strict) if checksum: symbol_string, check_symbol = symbol_string[:-1], symbol_string[-1] number = 0 for symbol in symbol_string: number = number * base + decode_symbols[symbol] if checksum: check_value = decode_symbols[check_symbol] modulo = number % check_base if check_value != modulo: raise ValueError("invalid check symbol '%s' for string '%s'" % (check_symbol, symbol_string)) return number
Decode an encoded symbol string. If checksum is set to True, the string is assumed to have a trailing check symbol which will be validated. If the checksum validation fails, a ValueError is raised. If strict is set to True, a ValueError is raised if the normalization step requires changes to the string. The decoded string is returned.
def format_content(content): paragraphs = parse_html(content) first = True for paragraph in paragraphs: if not first: yield "" for line in paragraph: yield line first = False
Given a Status contents in HTML, converts it into lines of plain text. Returns a generator yielding lines of content.
def get_filters_params(self, params=None): if not params: params = self.params lookup_params = params.copy() for ignored in IGNORED_PARAMS: if ignored in lookup_params: del lookup_params[ignored] return lookup_params
Returns all params except IGNORED_PARAMS
def _simulate_installation_of(to_install, package_set): installed = set() for inst_req in to_install: dist = make_abstract_dist(inst_req).dist() name = canonicalize_name(dist.key) package_set[name] = PackageDetails(dist.version, dist.requires()) installed.add(name) return installed
Computes the version of packages after installing to_install.
def grant_local_roles_for(brain_or_object, roles, user=None): user_id = get_user_id(user) obj = api.get_object(brain_or_object) if isinstance(roles, basestring): roles = [roles] obj.manage_addLocalRoles(user_id, roles) return get_local_roles_for(brain_or_object)
Grant local roles for the object Code extracted from `IRoleManager.manage_addLocalRoles` :param brain_or_object: Catalog brain or object :param user: A user ID, user object or None (for the current user) :param roles: The local roles to grant for the current user
def setupTxns(self, key, force: bool = False): import data dataDir = os.path.dirname(data.__file__) allEnvs = { "local": Environment("pool_transactions_local", "domain_transactions_local"), "test": Environment("pool_transactions_sandbox", "domain_transactions_sandbox"), "live": Environment("pool_transactions_live", "domain_transactions_live") } for env in allEnvs.values(): fileName = getattr(env, key, None) if not fileName: continue sourceFilePath = os.path.join(dataDir, fileName) if not os.path.exists(sourceFilePath): continue destFilePath = os.path.join( self.base_dir, genesis_txn_file(fileName)) if os.path.exists(destFilePath) and not force: continue copyfile(sourceFilePath, destFilePath) return self
Create base transactions :param key: ledger :param force: replace existing transaction files
def get_assembly_mapping_data(self, source_assembly, target_assembly): return self._load_assembly_mapping_data( self._get_path_assembly_mapping_data(source_assembly, target_assembly) )
Get assembly mapping data. Parameters ---------- source_assembly : {'NCBI36', 'GRCh37', 'GRCh38'} assembly to remap from target_assembly : {'NCBI36', 'GRCh37', 'GRCh38'} assembly to remap to Returns ------- dict dict of json assembly mapping data if loading was successful, else None
def lineage(self): indexes = six.moves.range(1, len(self.parts)) return {FieldPath(*self.parts[:index]) for index in indexes}
Return field paths for all parents. Returns: Set[:class:`FieldPath`]
def operation(self, other, function, **kwargs): result = TimeSeries(**kwargs) if isinstance(other, TimeSeries): for time, value in self: result[time] = function(value, other[time]) for time, value in other: result[time] = function(self[time], value) else: for time, value in self: result[time] = function(value, other) return result
Calculate "elementwise" operation either between this TimeSeries and another one, i.e. operation(t) = function(self(t), other(t)) or between this timeseries and a constant: operation(t) = function(self(t), other) If it's another time series, the measurement times in the resulting TimeSeries will be the union of the sets of measurement times of the input time series. If it's a constant, the measurement times will not change.
def update_preferences_by_category(self, category, communication_channel_id, notification_preferences_frequency): path = {} data = {} params = {} path["communication_channel_id"] = communication_channel_id path["category"] = category data["notification_preferences[frequency]"] = notification_preferences_frequency self.logger.debug("PUT /api/v1/users/self/communication_channels/{communication_channel_id}/notification_preference_categories/{category} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/users/self/communication_channels/{communication_channel_id}/notification_preference_categories/{category}".format(**path), data=data, params=params, no_data=True)
Update preferences by category. Change the preferences for multiple notifications based on the category for a single communication channel
def get_layout(): tica_msm = TemplateDir( 'tica', [ 'tica/tica.py', 'tica/tica-plot.py', 'tica/tica-sample-coordinate.py', 'tica/tica-sample-coordinate-plot.py', ], [ TemplateDir( 'cluster', [ 'cluster/cluster.py', 'cluster/cluster-plot.py', 'cluster/sample-clusters.py', 'cluster/sample-clusters-plot.py', ], [ TemplateDir( 'msm', [ 'msm/timescales.py', 'msm/timescales-plot.py', 'msm/microstate.py', 'msm/microstate-plot.py', 'msm/microstate-traj.py', ], [], ) ] ) ] ) layout = TemplateDir( '', [ '0-test-install.py', '1-get-example-data.py', 'README.md', ], [ TemplateDir( 'analysis', [ 'analysis/gather-metadata.py', 'analysis/gather-metadata-plot.py', ], [ TemplateDir( 'rmsd', [ 'rmsd/rmsd.py', 'rmsd/rmsd-plot.py', ], [], ), TemplateDir( 'landmarks', [ 'landmarks/find-landmarks.py', 'landmarks/featurize.py', 'landmarks/featurize-plot.py', ], [tica_msm], ), TemplateDir( 'dihedrals', [ 'dihedrals/featurize.py', 'dihedrals/featurize-plot.py', ], [tica_msm], ) ] ) ] ) return layout
Specify a hierarchy of our templates.
def _build_dependent_model_list(self, obj_schema): dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list))
Helper function to build the list of models the given object schema is referencing.
def save_loop(self): last_hash = hash(repr(self.hosts)) while self.running: eventlet.sleep(self.save_interval) next_hash = hash(repr(self.hosts)) if next_hash != last_hash: self.save() last_hash = next_hash
Saves the state if it has changed.
async def subscribe(self, topic): if self.socket_type not in {SUB, XSUB}: raise AssertionError( "A %s socket cannot subscribe." % self.socket_type.decode(), ) self._subscriptions.append(topic) tasks = [ asyncio.ensure_future( peer.connection.local_subscribe(topic), loop=self.loop, ) for peer in self._peers if peer.connection ] if tasks: try: await asyncio.wait(tasks, loop=self.loop) finally: for task in tasks: task.cancel()
Subscribe the socket to the specified topic. :param topic: The topic to subscribe to.
def host_info_getter(func, name=None): name = name or func.__name__ host_info_gatherers[name] = func return func
The decorated function is added to the process of collecting the host_info. This just adds the decorated function to the global ``sacred.host_info.host_info_gatherers`` dictionary. The functions from that dictionary are used when collecting the host info using :py:func:`~sacred.host_info.get_host_info`. Parameters ---------- func : callable A function that can be called without arguments and returns some json-serializable information. name : str, optional The name of the corresponding entry in host_info. Defaults to the name of the function. Returns ------- The function itself.
def install_python(name, version=None, install_args=None, override_args=False): return install(name, version=version, source='python', install_args=install_args, override_args=override_args)
Instructs Chocolatey to install a package via Python's easy_install. name The name of the package to be installed. Only accepts a single argument. version Install a specific version of the package. Defaults to latest version available. install_args A list of install arguments you want to pass to the installation process i.e product key or feature list override_args Set to true if you want to override the original install arguments (for the native installer) in the package and use your own. When this is set to False install_args will be appended to the end of the default arguments CLI Example: .. code-block:: bash salt '*' chocolatey.install_python <package name> salt '*' chocolatey.install_python <package name> version=<package version> salt '*' chocolatey.install_python <package name> install_args=<args> override_args=True
def is_user_profile_valid(user_profile): if not user_profile: return False if not type(user_profile) is dict: return False if UserProfile.USER_ID_KEY not in user_profile: return False if UserProfile.EXPERIMENT_BUCKET_MAP_KEY not in user_profile: return False experiment_bucket_map = user_profile.get(UserProfile.EXPERIMENT_BUCKET_MAP_KEY) if not type(experiment_bucket_map) is dict: return False for decision in experiment_bucket_map.values(): if type(decision) is not dict or UserProfile.VARIATION_ID_KEY not in decision: return False return True
Determine if provided user profile is valid or not. Args: user_profile: User's profile which needs to be validated. Returns: Boolean depending upon whether profile is valid or not.
def convert_entrez_to_uniprot(self, entrez): server = "http://www.uniprot.org/uniprot/?query=%22GENEID+{0}%22&format=xml".format(entrez) r = requests.get(server, headers={"Content-Type": "text/xml"}) if not r.ok: r.raise_for_status() sys.exit() response = r.text info = xmltodict.parse(response) try: data = info['uniprot']['entry']['accession'][0] return data except TypeError: data = info['uniprot']['entry'][0]['accession'][0] return data
Convert Entrez Id to Uniprot Id
def stem_word(self, word): if self.is_plural(word): return self.stem_plural_word(word) else: return self.stem_singular_word(word)
Stem a word to its common stem form.
def get_all_attribute_value( self, tag_name, attribute, format_value=True, **attribute_filter ): tags = self.find_tags(tag_name, **attribute_filter) for tag in tags: value = tag.get(attribute) or tag.get(self._ns(attribute)) if value is not None: if format_value: yield self._format_value(value) else: yield value
Yields all the attribute values in xml files which match with the tag name and the specific attribute :param str tag_name: specify the tag name :param str attribute: specify the attribute :param bool format_value: specify if the value needs to be formatted with packagename
def _at_least_x_are_true(a, b, x): match = tf.equal(a, b) match = tf.cast(match, tf.int32) return tf.greater_equal(tf.reduce_sum(match), x)
At least `x` of `a` and `b` `Tensors` are true.
def _apply_post_render_hooks(self, data, obj, fmt): hooks = self.post_render_hooks.get(fmt,[]) for hook in hooks: try: data = hook(data, obj) except Exception as e: self.param.warning("The post_render_hook %r could not " "be applied:\n\n %s" % (hook, e)) return data
Apply the post-render hooks to the data.
def update(self, **kwargs): svg_changed = False for prop in kwargs: if prop == "drawing_id": pass elif getattr(self, prop) != kwargs[prop]: if prop == "svg": svg_changed = True setattr(self, prop, kwargs[prop]) data = self.__json__() if not svg_changed: del data["svg"] self._project.controller.notification.emit("drawing.updated", data) self._project.dump()
Update the drawing :param kwargs: Drawing properties
def _auto(direction, name, value, source='auto', convert_to_human=True): if direction not in ['to', 'from']: return value props = property_data_zpool() if source == 'zfs': props = property_data_zfs() elif source == 'auto': props.update(property_data_zfs()) value_type = props[name]['type'] if name in props else 'str' if value_type == 'size' and direction == 'to': return globals()['{}_{}'.format(direction, value_type)](value, convert_to_human) return globals()['{}_{}'.format(direction, value_type)](value)
Internal magic for from_auto and to_auto
def _list_subnets_by_identifier(self, identifier): identifier = identifier.split('/', 1)[0] results = self.list_subnets(identifier=identifier, mask='id') return [result['id'] for result in results]
Returns a list of IDs of the subnet matching the identifier. :param string identifier: The identifier to look up :returns: List of matching IDs
def gfonts_repo_structure(fonts): from fontbakery.utils import get_absolute_path abspath = get_absolute_path(fonts[0]) return abspath.split(os.path.sep)[-3] in ["ufl", "ofl", "apache"]
The family at the given font path follows the files and directory structure typical of a font project hosted on the Google Fonts repo on GitHub ?
def __Post(self, path, request, body, headers): return synchronized_request.SynchronizedRequest(self, request, self._global_endpoint_manager, self.connection_policy, self._requests_session, 'POST', path, body, query_params=None, headers=headers)
Azure Cosmos 'POST' http request. :params str url: :params str path: :params (str, unicode, dict) body: :params dict headers: :return: Tuple of (result, headers). :rtype: tuple of (dict, dict)
def findspan(self, *words): for span in self.select(AbstractSpanAnnotation,None,True): if tuple(span.wrefs()) == words: return span raise NoSuchAnnotation
Returns the span element which spans over the specified words or morphemes. See also: :meth:`Word.findspans`
def new_result(self, job, update_model=True): if not job.exception is None: self.logger.warning("job {} failed with exception\n{}".format(job.id, job.exception))
registers finished runs Every time a run has finished, this function should be called to register it with the result logger. If overwritten, make sure to call this method from the base class to ensure proper logging. Parameters ---------- job: instance of hpbandster.distributed.dispatcher.Job contains all necessary information about the job update_model: boolean determines whether a model inside the config_generator should be updated
def __load_pst(self): if self.pst_arg is None: return None if isinstance(self.pst_arg, Pst): self.__pst = self.pst_arg return self.pst else: try: self.log("loading pst: " + str(self.pst_arg)) self.__pst = Pst(self.pst_arg) self.log("loading pst: " + str(self.pst_arg)) return self.pst except Exception as e: raise Exception("linear_analysis.__load_pst(): error loading"+\ " pest control from argument: " + str(self.pst_arg) + '\n->' + str(e))
private method set the pst attribute
def request_output(self, table, outtype): job_types = ["CSV", "DataSet", "FITS", "VOTable"] assert outtype in job_types params = {"tableName": table, "type": outtype} r = self._send_request("SubmitExtractJob", params=params) job_id = int(self._parse_single(r.text, "long")) return job_id
Request the output for a given table. ## Arguments * `table` (str): The name of the table to export. * `outtype` (str): The type of output. Must be one of: CSV - Comma Seperated Values DataSet - XML DataSet FITS - Flexible Image Transfer System (FITS Binary) VOTable - XML Virtual Observatory VOTABLE
def network_size(value, options=None, version=None): ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ]
Get the size of a network.
def check_aggregate(self, variable, components=None, exclude_on_fail=False, multiplier=1, **kwargs): df_components = self.aggregate(variable, components) if df_components is None: return rows = self._apply_filters(variable=variable) df_variable, df_components = ( _aggregate(self.data[rows], 'variable').align(df_components) ) diff = df_variable[~np.isclose(df_variable, multiplier * df_components, **kwargs)] if len(diff): msg = '`{}` - {} of {} rows are not aggregates of components' logger().info(msg.format(variable, len(diff), len(df_variable))) if exclude_on_fail: self._exclude_on_fail(diff.index.droplevel([2, 3, 4])) return IamDataFrame(diff, variable=variable).timeseries()
Check whether a timeseries matches the aggregation of its components Parameters ---------- variable: str variable to be checked for matching aggregation of sub-categories components: list of str, default None list of variables, defaults to all sub-categories of `variable` exclude_on_fail: boolean, default False flag scenarios failing validation as `exclude: True` multiplier: number, default 1 factor when comparing variable and sum of components kwargs: passed to `np.isclose()`
def alter_and_get(self, function): check_not_none(function, "function can't be None") return self._encode_invoke(atomic_reference_alter_and_get_codec, function=self._to_data(function))
Alters the currently stored reference by applying a function on it and gets the result. :param function: (Function), A stateful serializable object which represents the Function defined on server side. This object must have a serializable Function counter part registered on server side with the actual ``org.hazelcast.core.IFunction`` implementation. :return: (object), the new value, the result of the applied function.
def v4_int_to_packed(address): if address > _BaseV4._ALL_ONES: raise ValueError('Address too large for IPv4') return Bytes(struct.pack('!I', address))
The binary representation of this address. Args: address: An integer representation of an IPv4 IP address. Returns: The binary representation of this address. Raises: ValueError: If the integer is too large to be an IPv4 IP address.
def submit(self, workflow_uuid='', experiment='', image='', cmd='', prettified_cmd='', workflow_workspace='', job_name='', cvmfs_mounts='false'): job_spec = { 'experiment': experiment, 'docker_img': image, 'cmd': cmd, 'prettified_cmd': prettified_cmd, 'env_vars': {}, 'workflow_workspace': workflow_workspace, 'job_name': job_name, 'cvmfs_mounts': cvmfs_mounts, 'workflow_uuid': workflow_uuid } response, http_response = self._client.jobs.create_job(job=job_spec).\ result() if http_response.status_code == 400: raise HTTPBadRequest('Bad request to create a job. Error: {}'. format(http_response.data)) elif http_response.status_code == 500: raise HTTPInternalServerError('Internal Server Error. Error: {}'. format(http_response.data)) return response
Submit a job to RJC API. :param name: Name of the job. :param experiment: Experiment the job belongs to. :param image: Identifier of the Docker image which will run the job. :param cmd: String which represents the command to execute. It can be modified by the workflow engine i.e. prepending ``cd /some/dir/``. :prettified_cmd: Original command submitted by the user. :workflow_workspace: Path to the workspace of the workflow. :cvmfs_mounts: String with CVMFS volumes to mount in job pods. :return: Returns a dict with the ``job_id``.
def drawdown_recov(self, return_int=False): td = self.recov_date() - self.drawdown_end() if return_int: return td.days return td
Length of drawdown recovery in days. This is the duration from trough to recovery date. Parameters ---------- return_int : bool, default False If True, return the number of days as an int. If False, return a Pandas Timedelta object. Returns ------- int or pandas._libs.tslib.Timedelta
def start_response(self, status = 200, headers = [], clearheaders = True, disabletransferencoding = False): "Start to send response" if self._sendHeaders: raise HttpProtocolException('Cannot modify response, headers already sent') self.status = status self.disabledeflate = disabletransferencoding if clearheaders: self.sent_headers = headers[:] else: self.sent_headers.extend(headers)
Start to send response
def _to_bstr(l): if isinstance(l, str): l = l.encode('ascii', 'backslashreplace') elif not isinstance(l, bytes): l = str(l).encode('ascii', 'backslashreplace') return l
Convert to byte string.
def setCurrentIndex(self, y, x): self.dataTable.selectionModel().setCurrentIndex( self.dataTable.model().index(y, x), QItemSelectionModel.ClearAndSelect)
Set current selection.
def send_to_tsdb(self, realm, host, service, metrics, ts, path): if ts is None: ts = int(time.time()) data = { "measurement": service, "tags": { "host": host, "service": service, "realm": '.'.join(realm) if isinstance(realm, list) else realm, "path": path }, "time": ts, "fields": {} } if path is not None: data['tags'].update({"path": path}) for metric, value, _ in metrics: data['fields'].update({metric: value}) logger.debug("Data: %s", data) self.my_metrics.append(data) if self.metrics_count >= self.metrics_flush_count: self.flush()
Send performance data to time series database Indeed this function stores metrics in the internal cache and checks if the flushing is necessary and then flushes. :param realm: concerned realm :type: string :param host: concerned host :type: string :param service: concerned service :type: string :param metrics: list of metrics couple (name, value) :type: list :param ts: timestamp :type: int :param path: full path (eg. Graphite) for the received metrics :type: string
def translate(self, from_lang=None, to="de"): if from_lang is None: from_lang = self.translator.detect(self.string) return self.translator.translate(self.string, from_lang=from_lang, to_lang=to)
Translate the word to another language using Google's Translate API. .. versionadded:: 0.5.0 (``textblob``)
def clear(self): with self._conn: self._conn.execute('DELETE FROM results') self._conn.execute('DELETE FROM work_items')
Clear all work items from the session. This removes any associated results as well.
def as_pyplot_figure(self, label=1, **kwargs): import matplotlib.pyplot as plt exp = self.as_list(label=label, **kwargs) fig = plt.figure() vals = [x[1] for x in exp] names = [x[0] for x in exp] vals.reverse() names.reverse() colors = ['green' if x > 0 else 'red' for x in vals] pos = np.arange(len(exp)) + .5 plt.barh(pos, vals, align='center', color=colors) plt.yticks(pos, names) if self.mode == "classification": title = 'Local explanation for class %s' % self.class_names[label] else: title = 'Local explanation' plt.title(title) return fig
Returns the explanation as a pyplot figure. Will throw an error if you don't have matplotlib installed Args: label: desired label. If you ask for a label for which an explanation wasn't computed, will throw an exception. Will be ignored for regression explanations. kwargs: keyword arguments, passed to domain_mapper Returns: pyplot figure (barchart).
def on_init(app): docs_path = os.path.abspath(os.path.dirname(__file__)) root_path = os.path.abspath(os.path.join(docs_path, '..')) apidoc_path = 'sphinx-apidoc' swg2rst_path = 'swg2rst' if hasattr(sys, 'real_prefix'): bin_path = os.path.abspath(os.path.join(sys.prefix, 'bin')) apidoc_path = os.path.join(bin_path, apidoc_path) swg2rst_path = os.path.join(bin_path, swg2rst_path) check_call([apidoc_path, '-o', docs_path, os.path.join(root_path, 'user_tasks'), os.path.join(root_path, 'user_tasks/migrations')]) json_path = os.path.join(docs_path, 'swagger.json') rst_path = os.path.join(docs_path, 'rest_api.rst') check_call([swg2rst_path, json_path, '-f', 'rst', '-o', rst_path])
Run sphinx-apidoc and swg2rst after Sphinx initialization. Read the Docs won't run tox or custom shell commands, so we need this to avoid checking in the generated reStructuredText files.
def toProtocolElement(self): gaFeatureSet = protocol.FeatureSet() gaFeatureSet.id = self.getId() gaFeatureSet.dataset_id = self.getParentContainer().getId() gaFeatureSet.reference_set_id = pb.string(self._referenceSet.getId()) gaFeatureSet.name = self._name gaFeatureSet.source_uri = self._sourceUri attributes = self.getAttributes() for key in attributes: gaFeatureSet.attributes.attr[key] \ .values.extend(protocol.encodeValue(attributes[key])) return gaFeatureSet
Returns the representation of this FeatureSet as the corresponding ProtocolElement.
def hide_zeroes(self): for v in self.subset_labels: if v is not None and v.get_text() == '0': v.set_visible(False)
Sometimes it makes sense to hide the labels for subsets whose size is zero. This utility method does this.
def iteritems(self): for (key, val) in six.iteritems(self.__dict__): if key in self._printable_exclude: continue yield (key, val)
Wow this class is messed up. I had to overwrite items when moving to python3, just because I haden't called it yet
def print_examples(self, full=False): msg = [] i = 1 for key in sorted(self.DEMOS.keys()): example = self.DEMOS[key] if full or example["show"]: msg.append(u"Example %d (%s)" % (i, example[u"description"])) msg.append(u" $ %s %s" % (self.invoke, key)) msg.append(u"") i += 1 self.print_generic(u"\n" + u"\n".join(msg) + u"\n") return self.HELP_EXIT_CODE
Print the examples and exit. :param bool full: if ``True``, print all examples; otherwise, print only selected ones
def on_btn_metadata(self, event): if not self.check_for_meas_file(): return if not self.check_for_uncombined_files(): return if self.data_model_num == 2: wait = wx.BusyInfo('Compiling required data, please wait...') wx.SafeYield() self.ErMagic_frame = ErMagicBuilder.MagIC_model_builder(self.WD, self, self.er_magic) elif self.data_model_num == 3: wait = wx.BusyInfo('Compiling required data, please wait...') wx.SafeYield() self.ErMagic_frame = ErMagicBuilder.MagIC_model_builder3(self.WD, self, self.contribution) self.ErMagic_frame.Show() self.ErMagic_frame.Center() size = wx.DisplaySize() size = (size[0] - 0.3 * size[0], size[1] - 0.3 * size[1]) self.ErMagic_frame.Raise() del wait
Initiate the series of windows to add metadata to the contribution.
def _execute_select_commands(self, source, commands): rows = {} for tbl, command in tqdm(commands, total=len(commands), desc='Executing {0} select queries'.format(source)): if tbl not in rows: rows[tbl] = [] rows[tbl].extend(self.fetch(command, commit=True)) self._commit() return rows
Execute select queries for all of the tables from a source database.
def swear_word(self) -> str: bad_words = self._data['words'].get('bad') return self.random.choice(bad_words)
Get a random swear word. :return: Swear word. :Example: Damn.
def surface_projection_from_fault_data(cls, edges): lons = [] lats = [] for edge in edges: for point in edge: lons.append(point.longitude) lats.append(point.latitude) lons = numpy.array(lons, dtype=float) lats = numpy.array(lats, dtype=float) return Mesh(lons, lats, depths=None).get_convex_hull()
Get a surface projection of the complex fault surface. :param edges: A list of horizontal edges of the surface as instances of :class:`openquake.hazardlib.geo.line.Line`. :returns: Instance of :class:`~openquake.hazardlib.geo.polygon.Polygon` describing the surface projection of the complex fault.
def retrieve_content(self): path = self._construct_path_to_source_content() res = self._http.get(path) self._populated_fields['content'] = res['content'] return res['content']
Retrieve the content of a resource.
def get_report_details(self, report_id, id_type=None): params = {'idType': id_type} resp = self._client.get("reports/%s" % report_id, params=params) return Report.from_dict(resp.json())
Retrieves a report by its ID. Internal and external IDs are both allowed. :param str report_id: The ID of the incident report. :param str id_type: Indicates whether ID is internal or external. :return: The retrieved |Report| object. Example: >>> report = ts.get_report_details("1a09f14b-ef8c-443f-b082-9643071c522a") >>> print(report) { "id": "1a09f14b-ef8c-443f-b082-9643071c522a", "created": 1515571633505, "updated": 1515620420062, "reportBody": "Employee reported suspect email. We had multiple reports of suspicious email overnight ...", "title": "Phishing Incident", "enclaveIds": [ "ac6a0d17-7350-4410-bc57-9699521db992" ], "distributionType": "ENCLAVE", "timeBegan": 1479941278000 }
def secure_boot(self): return secure_boot.SecureBoot( self._conn, utils.get_subresource_path_by(self, 'SecureBoot'), redfish_version=self.redfish_version)
Property to provide reference to `SecureBoot` instance It is calculated once when the first time it is queried. On refresh, this property gets reset.
def _get_ilo_details(self): manager_uri = '/rest/v1/Managers/1' status, headers, manager = self._rest_get(manager_uri) if status != 200: msg = self._get_extended_error(manager) raise exception.IloError(msg) mtype = self._get_type(manager) if (mtype not in ['Manager.0', 'Manager.1']): msg = "%s is not a valid Manager type " % mtype raise exception.IloError(msg) return manager, manager_uri
Gets iLO details :raises: IloError, on an error from iLO. :raises: IloConnectionError, if iLO is not up after reset. :raises: IloCommandNotSupportedError, if the command is not supported on the server.
def _get_block_transaction_data(db: BaseDB, transaction_root: Hash32) -> Iterable[Hash32]: transaction_db = HexaryTrie(db, root_hash=transaction_root) for transaction_idx in itertools.count(): transaction_key = rlp.encode(transaction_idx) if transaction_key in transaction_db: yield transaction_db[transaction_key] else: break
Returns iterable of the encoded transactions for the given block header
def find_group_differences(groups1, groups2): r import utool as ut item_to_others1 = {item: set(_group) - {item} for _group in groups1 for item in _group} item_to_others2 = {item: set(_group) - {item} for _group in groups2 for item in _group} flat_items1 = ut.flatten(groups1) flat_items2 = ut.flatten(groups2) flat_items = list(set(flat_items1 + flat_items2)) errors = [] item_to_error = {} for item in flat_items: others1 = item_to_others1.get(item, set([])) others2 = item_to_others2.get(item, set([])) missing1 = others1 - others2 missing2 = others2 - others1 error = len(missing1) + len(missing2) if error > 0: item_to_error[item] = error errors.append(error) total_error = sum(errors) return total_error
r""" Returns a measure of how disimilar two groupings are Args: groups1 (list): true grouping of items groups2 (list): predicted grouping of items CommandLine: python -m utool.util_alg find_group_differences SeeAlso: vtool.group_indicies vtool.apply_grouping Example0: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> groups1 = [[1, 2, 3], [4], [5, 6], [7, 8], [9, 10, 11]] >>> groups2 = [[1, 2, 11], [3, 4], [5, 6], [7], [8, 9], [10]] >>> total_error = find_group_differences(groups1, groups2) >>> result = ('total_error = %r' % (total_error,)) >>> print(result) total_error = 20 Example1: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> groups1 = [[1, 2, 3], [4], [5, 6]] >>> groups2 = [[1, 2, 3], [4], [5, 6]] >>> total_error = find_group_differences(groups1, groups2) >>> result = ('total_error = %r' % (total_error,)) >>> print(result) total_error = 0 Example2: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> groups1 = [[1, 2, 3], [4], [5, 6]] >>> groups2 = [[1, 2], [4], [5, 6]] >>> total_error = find_group_differences(groups1, groups2) >>> result = ('total_error = %r' % (total_error,)) >>> print(result) total_error = 4 Ignore: # Can this be done via sklearn label analysis? # maybe no... the labels assigned to each component are arbitrary # maybe if we label edges? likely too many labels. groups1 = [[1, 2, 3], [4], [5, 6], [7, 8], [9, 10, 11]] groups2 = [[1, 2, 11], [3, 4], [5, 6], [7], [8, 9], [10]]
def fetch_bug_details(self, bug_ids): params = {'include_fields': 'product, component, priority, whiteboard, id'} params['id'] = bug_ids try: response = self.session.get(settings.BZ_API_URL + '/rest/bug', headers=self.session.headers, params=params, timeout=30) response.raise_for_status() except RequestException as e: logger.warning('error fetching bugzilla metadata for bugs due to {}'.format(e)) return None if response.headers['Content-Type'] == 'text/html; charset=UTF-8': return None data = response.json() if 'bugs' not in data: return None return data['bugs']
Fetches bug metadata from bugzilla and returns an encoded dict if successful, otherwise returns None.
def _highlight(string, color): if CONFIG['color']: if color < 8: return '\033[{color}m{string}\033[0m'.format(string = string, color = color+30) else: return '\033[{color}m{string}\033[0m'.format(string = string, color = color+82) else: return string
Return a string highlighted for a terminal.
def set_device_scale(self, x_scale, y_scale): cairo.cairo_surface_set_device_scale(self._pointer, x_scale, y_scale) self._check_status()
Sets a scale that is multiplied to the device coordinates determined by the CTM when drawing to surface. One common use for this is to render to very high resolution display devices at a scale factor, so that code that assumes 1 pixel will be a certain size will still work. Setting a transformation via cairo_translate() isn't sufficient to do this, since functions like cairo_device_to_user() will expose the hidden scale. Note that the scale affects drawing to the surface as well as using the surface in a source pattern. :param x_scale: the scale in the X direction, in device units. :param y_scale: the scale in the Y direction, in device units. *New in cairo 1.14.* *New in cairocffi 0.9.*
def get_resources(self, ids, cache=True): client = local_session(self.manager.session_factory).client('dax') return client.describe_clusters(ClusterNames=ids).get('Clusters')
Retrieve dax resources for serverless policies or related resources
def verify(password_hash, password): ensure(len(password_hash) == PWHASH_SIZE, "The password hash must be exactly %s bytes long" % nacl.bindings.crypto_pwhash_scryptsalsa208sha256_STRBYTES, raising=exc.ValueError) return nacl.bindings.crypto_pwhash_scryptsalsa208sha256_str_verify( password_hash, password )
Takes the output of scryptsalsa208sha256 and compares it against a user provided password to see if they are the same :param password_hash: bytes :param password: bytes :rtype: boolean .. versionadded:: 1.2
def OnCopy(self, event): focus = self.main_window.FindFocus() if isinstance(focus, wx.TextCtrl): focus.Copy() else: selection = self.main_window.grid.selection data = self.main_window.actions.copy(selection) self.main_window.clipboard.set_clipboard(data) event.Skip()
Clipboard copy event handler
def parentItem(self, value): self._parentItem = value self._recursiveSetNodePath(self._constructNodePath())
The parent item
def __do_parse(self, pattern_str): in_ = antlr4.InputStream(pattern_str) lexer = STIXPatternLexer(in_) lexer.removeErrorListeners() token_stream = antlr4.CommonTokenStream(lexer) parser = STIXPatternParser(token_stream) parser.removeErrorListeners() error_listener = ParserErrorListener() parser.addErrorListener(error_listener) parser._errHandler = antlr4.BailErrorStrategy() for i, lit_name in enumerate(parser.literalNames): if lit_name == u"<INVALID>": parser.literalNames[i] = parser.symbolicNames[i] try: tree = parser.pattern() return tree except antlr4.error.Errors.ParseCancellationException as e: real_exc = e.args[0] parser._errHandler.reportError(parser, real_exc) six.raise_from(ParseException(error_listener.error_message), real_exc)
Parses the given pattern and returns the antlr parse tree. :param pattern_str: The STIX pattern :return: The parse tree :raises ParseException: If there is a parse error
async def create(cls, destination: Union[int, Subnet], source: Union[int, Subnet], gateway_ip: str, metric: int): params = { "gateway_ip": gateway_ip, "metric": metric, } if isinstance(source, Subnet): params["source"] = source.id elif isinstance(source, int): params["source"] = source if isinstance(destination, Subnet): params["destination"] = destination.id elif isinstance(destination, int): params["destination"] = destination return cls._object(await cls._handler.create(**params))
Create a `StaticRoute` in MAAS. :param name: The name of the `StaticRoute` (optional, will be given a default value if not specified). :type name: `str` :param description: A description of the `StaticRoute` (optional). :type description: `str` :param class_type: The class type of the `StaticRoute` (optional). :type class_type: `str` :returns: The created StaticRoute :rtype: `StaticRoute`
def form_valid(self, form): self.metric_slugs = [k.strip() for k in form.cleaned_data['metrics']] return super(AggregateFormView, self).form_valid(form)
Pull the metrics from the submitted form, and store them as a list of strings in ``self.metric_slugs``.
def uniqify(list_): "inefficient on long lists; short lists only. preserves order." a=[] for x in list_: if x not in a: a.append(x) return a
inefficient on long lists; short lists only. preserves order.
def guarded(meth): @functools.wraps(meth) def _check(self, *args, **kwargs): self._check_conn_validity(meth.__name__) return meth(self, *args, **kwargs) return _check
A decorator to add a sanity check to ConnectionResource methods.
def __sub(self, string: str = '') -> str: replacer = self.random.choice(['_', '-']) return re.sub(r'\s+', replacer, string.strip())
Replace spaces in string. :param string: String. :return: String without spaces.
def validate_serializer(serializer, _type): if not issubclass(serializer, _type): raise ValueError("Serializer should be an instance of {}".format(_type.__name__))
Validates the serializer for given type. :param serializer: (Serializer), the serializer to be validated. :param _type: (Type), type to be used for serializer validation.