code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _generate_report_all(self): assert self.workbook is not None count = 0 for sid in self.folders.subfolders(self.folder_id, self.user): count += 1 self._generate_for_subfolder(sid) if count == 0: print("I: empty workbook created: no subfolders found")
Generate report for all subfolders contained by self.folder_id.
def prox_zero(X, step): return np.zeros(X.shape, dtype=X.dtype)
Proximal operator to project onto zero
def _make_item(model): item = Item(model.id, model.content, model.media_type) return item
Makes an ``.epub.Item`` from a ``.models.Document`` or ``.models.Resource``
def add_constraint(self, func, variables, default_values=None): self._constraints.append((func, variables, default_values or ()))
Adds a constraint that applies to one or more variables. The function must return true or false to indicate which combinations of variable values are valid.
def set_value(self, value): parsed_value = self._parse_value(value) self.value = parsed_value
Parses, and sets the value attribute for the field. :param value: The value to be parsed and set, the allowed input types vary depending on the Field used
def limitsChanged(self, param, limits): ParameterItem.limitsChanged(self, param, limits) t = self.param.opts['type'] if t == 'int' or t == 'float': self.widget.setOpts(bounds=limits) else: return
Called when the parameter's limits have changed
def export_public_keys(env=None, sp=subprocess): args = gpg_command(['--export']) result = check_output(args=args, env=env, sp=sp) if not result: raise KeyError('No GPG public keys found at env: {!r}'.format(env)) return result
Export all GPG public keys.
def flush(self): annotation = self.get_annotation() if annotation.get(ATTACHMENTS_STORAGE) is not None: del annotation[ATTACHMENTS_STORAGE]
Remove the whole storage
def write_warning (self, url_data): self.write(self.part("warning") + self.spaces("warning")) warning_msgs = [u"[%s] %s" % x for x in url_data.warnings] self.writeln(self.wrap(warning_msgs, 65), color=self.colorwarning)
Write url_data.warning.
def _get_local_folder(self, root=None): if root is None: root = Path() for folders in ['.'], [self.user, self.napp]: kytos_json = root / Path(*folders) / 'kytos.json' if kytos_json.exists(): with kytos_json.open() as file_descriptor: meta = json.load(file_descriptor) username = meta.get('username', meta.get('author')) if username == self.user and meta.get('name') == self.napp: return kytos_json.parent raise FileNotFoundError('kytos.json not found.')
Return local NApp root folder. Search for kytos.json in _./_ folder and _./user/napp_. Args: root (pathlib.Path): Where to begin searching. Return: pathlib.Path: NApp root folder. Raises: FileNotFoundError: If there is no such local NApp.
def read_line(self, fid): lin = ' while lin[0] == ' lin = fid.readline().strip() if lin == '': return lin return lin
Read a line from a file string and check it isn't either empty or commented before returning.
def _deflate(cls): data = {k: v for k, v in vars(cls).items() if not k.startswith("_")} return {Constants.CONFIG_KEY: data}
Prepare for serialisation - returns a dictionary
def _configure_interrupt(self, function_name, timeout, container, is_debugging): def timer_handler(): LOG.info("Function '%s' timed out after %d seconds", function_name, timeout) self._container_manager.stop(container) def signal_handler(sig, frame): LOG.info("Execution of function %s was interrupted", function_name) self._container_manager.stop(container) if is_debugging: LOG.debug("Setting up SIGTERM interrupt handler") signal.signal(signal.SIGTERM, signal_handler) else: LOG.debug("Starting a timer for %s seconds for function '%s'", timeout, function_name) timer = threading.Timer(timeout, timer_handler, ()) timer.start() return timer
When a Lambda function is executing, we setup certain interrupt handlers to stop the execution. Usually, we setup a function timeout interrupt to kill the container after timeout expires. If debugging though, we don't enforce a timeout. But we setup a SIGINT interrupt to catch Ctrl+C and terminate the container. :param string function_name: Name of the function we are running :param integer timeout: Timeout in seconds :param samcli.local.docker.container.Container container: Instance of a container to terminate :param bool is_debugging: Are we debugging? :return threading.Timer: Timer object, if we setup a timer. None otherwise
def get_api_url( self, api_resource, auth_token_ticket, authenticator, private_key, service_url=None, **kwargs ): auth_token, auth_token_signature = self._build_auth_token_data( auth_token_ticket, authenticator, private_key, **kwargs ) params = { 'at': auth_token, 'ats': auth_token_signature, } if service_url is not None: params['service'] = service_url url = '{}?{}'.format( self._get_api_url(api_resource), urlencode(params), ) return url
Build an auth-token-protected CAS API url.
def compute(self, activeColumns, apicalInput=(), apicalGrowthCandidates=None, learn=True): activeColumns = np.asarray(activeColumns) apicalInput = np.asarray(apicalInput) if apicalGrowthCandidates is None: apicalGrowthCandidates = apicalInput apicalGrowthCandidates = np.asarray(apicalGrowthCandidates) self.prevPredictedCells = self.predictedCells self.activateCells(activeColumns, self.activeCells, self.prevApicalInput, self.winnerCells, self.prevApicalGrowthCandidates, learn) self.depolarizeCells(self.activeCells, apicalInput, learn) self.prevApicalInput = apicalInput.copy() self.prevApicalGrowthCandidates = apicalGrowthCandidates.copy()
Perform one timestep. Activate the specified columns, using the predictions from the previous timestep, then learn. Then form a new set of predictions using the new active cells and the apicalInput. @param activeColumns (numpy array) List of active columns @param apicalInput (numpy array) List of active input bits for the apical dendrite segments @param apicalGrowthCandidates (numpy array or None) List of bits that the active cells may grow new apical synapses to If None, the apicalInput is assumed to be growth candidates. @param learn (bool) Whether to grow / reinforce / punish synapses
def iter_insert_items(tree): if tree.list_values: keys = tree.attrs for values in tree.list_values: if len(keys) != len(values): raise SyntaxError( "Values '%s' do not match attributes " "'%s'" % (values, keys) ) yield dict(zip(keys, map(resolve, values))) elif tree.map_values: for item in tree.map_values: data = {} for (key, val) in item: data[key] = resolve(val) yield data else: raise SyntaxError("No insert data found")
Iterate over the items to insert from an INSERT statement
def uninstall(self): for filename in self.files: (home_filepath, mackup_filepath) = self.getFilepaths(filename) if (os.path.isfile(mackup_filepath) or os.path.isdir(mackup_filepath)): if os.path.exists(home_filepath): if self.verbose: print("Reverting {}\n at {} ..." .format(mackup_filepath, home_filepath)) else: print("Reverting {} ...".format(filename)) if self.dry_run: continue utils.delete(home_filepath) utils.copy(mackup_filepath, home_filepath) elif self.verbose: print("Doing nothing, {} does not exist" .format(mackup_filepath))
Uninstall Mackup. Restore any file where it was before the 1st Mackup backup. Algorithm: for each file in config if mackup/file exists if home/file exists delete home/file copy mackup/file home/file delete the mackup folder print how to delete mackup
def iter_filtered_dir_entry(dir_entries, match_patterns, on_skip): def match(dir_entry_path, match_patterns, on_skip): for match_pattern in match_patterns: if dir_entry_path.path_instance.match(match_pattern): on_skip(dir_entry_path, match_pattern) return True return False for entry in dir_entries: try: dir_entry_path = DirEntryPath(entry) except FileNotFoundError as err: log.error("Can't make DirEntryPath() instance: %s" % err) continue if match(dir_entry_path, match_patterns, on_skip): yield None else: yield dir_entry_path
Filter a list of DirEntryPath instances with the given pattern :param dir_entries: list of DirEntryPath instances :param match_patterns: used with Path.match() e.g.: "__pycache__/*", "*.tmp", "*.cache" :param on_skip: function that will be called if 'match_patterns' hits. e.g.: def on_skip(entry, pattern): log.error("Skip pattern %r hit: %s" % (pattern, entry.path)) :return: yields None or DirEntryPath instances
def find_channels(channels, connection=None, host=None, port=None, sample_rate=None, type=Nds2ChannelType.any(), dtype=Nds2DataType.any(), unique=False, epoch='ALL'): if not isinstance(epoch, tuple): epoch = (epoch or 'All',) connection.set_epoch(*epoch) if isinstance(sample_rate, (int, float)): sample_rate = (sample_rate, sample_rate) elif sample_rate is None: sample_rate = tuple() out = [] for name in _get_nds2_names(channels): out.extend(_find_channel(connection, name, type, dtype, sample_rate, unique=unique)) return out
Query an NDS2 server for channel information Parameters ---------- channels : `list` of `str` list of channel names to query, each can include bash-style globs connection : `nds2.connection`, optional open NDS2 connection to use for query host : `str`, optional name of NDS2 server to query, required if ``connection`` is not given port : `int`, optional port number on host to use for NDS2 connection sample_rate : `int`, `float`, `tuple`, optional a single number, representing a specific sample rate to match, or a tuple representing a ``(low, high)` interval to match type : `int`, optional the NDS2 channel type to match dtype : `int`, optional the NDS2 data type to match unique : `bool`, optional, default: `False` require one (and only one) match per channel epoch : `str`, `tuple` of `int`, optional the NDS epoch to restrict to, either the name of a known epoch, or a 2-tuple of GPS ``[start, stop)`` times Returns ------- channels : `list` of `nds2.channel` list of NDS2 channel objects See also -------- nds2.connection.find_channels for documentation on the underlying query method Examples -------- >>> from gwpy.io.nds2 import find_channels >>> find_channels(['G1:DER_DATA_H'], host='nds.ligo.caltech.edu') [<G1:DER_DATA_H (16384Hz, RDS, FLOAT64)>]
def volume_delete(self, name): if self.volume_conn is None: raise SaltCloudSystemExit('No cinder endpoint available') nt_ks = self.volume_conn try: volume = self.volume_show(name) except KeyError as exc: raise SaltCloudSystemExit('Unable to find {0} volume: {1}'.format(name, exc)) if volume['status'] == 'deleted': return volume response = nt_ks.volumes.delete(volume['id']) return volume
Delete a block device
def OverwriteAndClose(self, compressed_data, size): self.Set(self.Schema.CONTENT(compressed_data)) self.Set(self.Schema.SIZE(size)) super(AFF4MemoryStreamBase, self).Close()
Directly overwrite the current contents. Replaces the data currently in the stream with compressed_data, and closes the object. Makes it possible to avoid recompressing the data. Args: compressed_data: The data to write, must be zlib compressed. size: The uncompressed size of the data.
def sum_biomass_weight(reaction): return sum(-coef * met.formula_weight for (met, coef) in iteritems(reaction.metabolites)) / 1000.0
Compute the sum of all reaction compounds. This function expects all metabolites of the biomass reaction to have formula information assigned. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- float The molecular weight of the biomass reaction in units of g/mmol.
def djd_to_datetime(djd, tz='UTC'): djd_start = pytz.utc.localize(dt.datetime(1899, 12, 31, 12)) utc_time = djd_start + dt.timedelta(days=djd) return utc_time.astimezone(pytz.timezone(tz))
Converts a Dublin Julian Day float to a datetime.datetime object Parameters ---------- djd : float fractional days since 12/31/1899+0000 tz : str, default 'UTC' timezone to localize the result to Returns ------- datetime.datetime The resultant datetime localized to tz
def _delete_collection(self, ctx): assert isinstance(ctx, ResourceQueryContext) filter_by = ctx.get_filter_by() q = self._orm.query(self.model_cls) if filter_by is not None: q = self.to_filter(q, filter_by) return q.delete()
Delete a collection from DB, optionally filtered by ``filter_by``
def _to_tuple(self, _list): result = list() for l in _list: if isinstance(l, list): result.append(tuple(self._to_tuple(l))) else: result.append(l) return tuple(result)
Recursively converts lists to tuples
def add_default_fields(self, names, **kwargs): if isinstance(names, string_types): names = [names] default_fields = self.default_fields(include_virtual=False, **kwargs) arr = self.__class__(1, field_kwargs=kwargs) sortdict = dict([[nm, ii] for ii,nm in enumerate(names)]) names = list(get_needed_fieldnames(arr, names)) names.sort(key=lambda x: sortdict[x] if x in sortdict else len(names)) fields = [(name, default_fields[name]) for name in names] arrays = [] names = [] for name,dt in fields: arrays.append(default_empty(self.size, dtype=[(name, dt)])) names.append(name) return self.add_fields(arrays, names)
Adds one or more empty default fields to self. Parameters ---------- names : (list of) string(s) The names of the fields to add. Must be a field in self's default fields. Other keyword args are any arguments passed to self's default fields. Returns ------- new array : instance of this array A copy of this array with the field added.
def decode(cls, s): b = ByteString(s) bd = base64.b64decode(b) return String(bd)
decodes a base64 string to plain text :param s: unicode str|bytes, the base64 encoded string :returns: unicode str
def s2b(s): ret = [] for c in s: ret.append(bin(ord(c))[2:].zfill(8)) return "".join(ret)
String to binary.
def graphviz_imshow(self, ax=None, figsize=None, dpi=300, fmt="png", **kwargs): graph = self.get_graphviz(**kwargs) graph.format = fmt graph.attr(dpi=str(dpi)) _, tmpname = tempfile.mkstemp() path = graph.render(tmpname, view=False, cleanup=True) ax, fig, _ = get_ax_fig_plt(ax=ax, figsize=figsize, dpi=dpi) import matplotlib.image as mpimg ax.imshow(mpimg.imread(path, format="png")) ax.axis("off") return fig
Generate flow graph in the DOT language and plot it with matplotlib. Args: ax: matplotlib :class:`Axes` or None if a new figure should be created. figsize: matplotlib figure size (None to use default) dpi: DPI value. fmt: Select format for output image Return: matplotlib Figure
def DEFINE_multi_enum( name, default, enum_values, help, flag_values=_flagvalues.FLAGS, case_sensitive=True, **args): parser = _argument_parser.EnumParser(enum_values, case_sensitive) serializer = _argument_parser.ArgumentSerializer() DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
Registers a flag whose value can be a list strings from enum_values. Use the flag on the command line multiple times to place multiple enum values into the list. The 'default' may be a single string (which will be converted into a single-element list) or a list of strings. Args: name: str, the flag name. default: Union[Iterable[Text], Text, None], the default value of the flag; see `DEFINE_multi`. enum_values: [str], a non-empty list of strings with the possible values for the flag. help: str, the help message. flag_values: FlagValues, the FlagValues instance with which the flag will be registered. This should almost never need to be overridden. case_sensitive: Whether or not the enum is to be case-sensitive. **args: Dictionary with extra keyword args that are passed to the Flag __init__.
def split(self, frac): from .. import preprocess split_obj = getattr(preprocess, '_Split')(fraction=frac) return split_obj.transform(self)
Split the DataFrame into two DataFrames with certain ratio. :param frac: Split ratio :type frac: float :return: two split DataFrame objects :rtype: list[DataFrame]
def parallel_map(func, iterable, args=None, kwargs=None, workers=None): if args is None: args = () if kwargs is None: kwargs = {} if workers is not None: pool = Pool(workers) else: pool = Group() iterable = [pool.spawn(func, i, *args, **kwargs) for i in iterable] pool.join(raise_error=True) for idx, i in enumerate(iterable): i_type = type(i.get()) i_value = i.get() if issubclass(i_type, BaseException): raise i_value iterable[idx] = i_value return iterable
Map func on a list using gevent greenlets. :param func: function applied on iterable elements :type func: function :param iterable: elements to map the function over :type iterable: iterable :param args: arguments of func :type args: tuple :param kwargs: keyword arguments of func :type kwargs: dict :param workers: limit the number of greenlets running in parrallel :type workers: int
def getsourcelines(object): lines, lnum = findsource(object) if ismodule(object): return lines, 0 else: return getblock(lines[lnum:]), lnum + 1
Return a list of source lines and starting line number for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a list of the lines corresponding to the object and the line number indicates where in the original source file the first line of code was found. An IOError is raised if the source code cannot be retrieved.
def create_from_previous_version(cls, previous_shadow, payload): version, previous_payload = (previous_shadow.version + 1, previous_shadow.to_dict(include_delta=False)) if previous_shadow else (1, {}) if payload is None: shadow = FakeShadow(None, None, None, version, deleted=True) return shadow desired = payload['state'].get( 'desired', previous_payload.get('state', {}).get('desired', None) ) reported = payload['state'].get( 'reported', previous_payload.get('state', {}).get('reported', None) ) shadow = FakeShadow(desired, reported, payload, version) return shadow
set None to payload when you want to delete shadow
def degree_histogram(G, t=None): counts = Counter(d for n, d in G.degree(t=t).items()) return [counts.get(i, 0) for i in range(max(counts) + 1)]
Return a list of the frequency of each degree value. Parameters ---------- G : Graph opject DyNetx graph object t : snapshot id (default=None) snapshot id Returns ------- hist : list A list of frequencies of degrees. The degree values are the index in the list. Notes ----- Note: the bins are width one, hence len(list) can be large (Order(number_of_edges))
def set_current_context(self, name): if self.context_exists(name): self.data['current-context'] = name else: raise KubeConfError("Context does not exist.")
Set the current context in kubeconfig.
def url_converter(self, *args, **kwargs): upstream_converter = super(PatchedManifestStaticFilesStorage, self).url_converter(*args, **kwargs) def converter(matchobj): try: upstream_converter(matchobj) except ValueError: matched, url = matchobj.groups() return matched return converter
Return the custom URL converter for the given file name.
def _get_core_transform(self, resolution): return self._base_transform(self._center_longitude, self._center_latitude, resolution)
The projection for the stereonet as a matplotlib transform. This is primarily called by LambertAxes._set_lim_and_transforms.
def tail(filename, number_of_bytes): with open(filename, "rb") as f: if os.stat(filename).st_size > number_of_bytes: f.seek(-number_of_bytes, 2) return f.read()
Returns the last number_of_bytes of filename
def parse(self, val): s = str(val).lower() if s == "true": return True elif s == "false": return False else: raise ValueError("cannot interpret '{}' as boolean".format(val))
Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case.
def _apply_commit_rules(rules, commit): all_violations = [] for rule in rules: violations = rule.validate(commit) if violations: all_violations.extend(violations) return all_violations
Applies a set of rules against a given commit and gitcontext
def remove(self, names): if not isinstance(names, list): raise TypeError("names can only be an instance of type list") for a in names[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") progress = self._call("remove", in_p=[names]) progress = IProgress(progress) return progress
Deletes the given files in the current directory level. in names of type str The names to remove. return progress of type :class:`IProgress` Progress object to track the operation completion.
def to_latlon(geojson, origin_espg=None): if isinstance(geojson, dict): if origin_espg: code = origin_espg else: code = epsg_code(geojson) if code: origin = Proj(init='epsg:%s' % code) wgs84 = Proj(init='epsg:4326') wrapped = test_wrap_coordinates(geojson['coordinates'], origin, wgs84) new_coords = convert_coordinates(geojson['coordinates'], origin, wgs84, wrapped) if new_coords: geojson['coordinates'] = new_coords try: del geojson['crs'] except KeyError: pass return geojson
Convert a given geojson to wgs84. The original epsg must be included insde the crs tag of geojson
def compute(mechanism, subsystem, purviews, cause_purviews, effect_purviews): concept = subsystem.concept(mechanism, purviews=purviews, cause_purviews=cause_purviews, effect_purviews=effect_purviews) concept.subsystem = None return concept
Compute a |Concept| for a mechanism, in this |Subsystem| with the provided purviews.
def perturb_vec(q, cone_half_angle=2): r rand_vec = tan_rand(q) cross_vector = attitude.unit_vector(np.cross(q, rand_vec)) s = np.random.uniform(0, 1, 1) r = np.random.uniform(0, 1, 1) h = np.cos(np.deg2rad(cone_half_angle)) phi = 2 * np.pi * s z = h + ( 1- h) * r sinT = np.sqrt(1 - z**2) x = np.cos(phi) * sinT y = np.sin(phi) * sinT perturbed = rand_vec * x + cross_vector * y + q * z return perturbed
r"""Perturb a vector randomly qp = perturb_vec(q, cone_half_angle=2) Parameters ---------- q : (n,) numpy array Vector to perturb cone_half_angle : float Maximum angle to perturb the vector in degrees Returns ------- perturbed : (n,) numpy array Perturbed numpy array Author ------ Shankar Kulumani GWU skulumani@gwu.edu References ---------- .. [1] https://stackoverflow.com/questions/2659257/perturb-vector-by-some-angle
def compile(self): self.content = "".join(map(lambda x: x.compile(), self.children)) return self._generate_html()
Recursively compile this widget as well as all of its children to HTML. :returns: HTML string representation of this widget.
def removeDomain(self, subnetId, domainId): subnetDomainIds = [] for domain in self[subnetId]['domains']: subnetDomainIds.append(domain['id']) subnetDomainIds.remove(domainId) self[subnetId]["domain_ids"] = subnetDomainIds return len(self[subnetId]["domains"]) is len(subnetDomainIds)
Function removeDomain Delete a domain from a subnet @param subnetId: The subnet Id @param domainId: The domainId to be attached wiuth the subnet @return RETURN: boolean
def probe_disable(cls, resource): oper = cls.call('hosting.rproxy.probe.disable', cls.usable_id(resource)) cls.echo('Desactivating probe on %s' % resource) cls.display_progress(oper) cls.echo('The probe have been desactivated') return oper
Disable a probe on a webaccelerator
def get_meta(self): rdf = self.get_meta_rdf(fmt='n3') return ThingMeta(self, rdf, self._client.default_lang, fmt='n3')
Get the metadata object for this Thing Returns a [ThingMeta](ThingMeta.m.html#IoticAgent.IOT.ThingMeta.ThingMeta) object
def from_files(cls, ID, datafiles, parser='name', position_mapper=None, readdata_kwargs={}, readmeta_kwargs={}, ID_kwargs={}, **kwargs): if position_mapper is None: if isinstance(parser, six.string_types): position_mapper = parser else: msg = "When using a custom parser, you must specify the position_mapper keyword." raise ValueError(msg) d = _assign_IDS_to_datafiles(datafiles, parser, cls._measurement_class, **ID_kwargs) measurements = [] for sID, dfile in d.items(): try: measurements.append(cls._measurement_class(sID, datafile=dfile, readdata_kwargs=readdata_kwargs, readmeta_kwargs=readmeta_kwargs)) except: msg = 'Error occured while trying to parse file: %s' % dfile raise IOError(msg) return cls(ID, measurements, position_mapper, **kwargs)
Create an OrderedCollection of measurements from a set of data files. Parameters ---------- {_bases_ID} {_bases_data_files} {_bases_filename_parser} {_bases_position_mapper} {_bases_ID_kwargs} kwargs : dict Additional key word arguments to be passed to constructor.
def _get_files_modified(): cmd = "git diff-index --cached --name-only --diff-filter=ACMRTUXB HEAD" _, files_modified, _ = run(cmd) extensions = [re.escape(ext) for ext in list(SUPPORTED_FILES) + [".rst"]] test = "(?:{0})$".format("|".join(extensions)) return list(filter(lambda f: re.search(test, f), files_modified))
Get the list of modified files that are Python or Jinja2.
def unnest(elem): if isinstance(elem, Iterable) and not isinstance(elem, six.string_types): elem, target = tee(elem, 2) else: target = elem for el in target: if isinstance(el, Iterable) and not isinstance(el, six.string_types): el, el_copy = tee(el, 2) for sub in unnest(el_copy): yield sub else: yield el
Flatten an arbitrarily nested iterable :param elem: An iterable to flatten :type elem: :class:`~collections.Iterable` >>> nested_iterable = (1234, (3456, 4398345, (234234)), (2396, (23895750, 9283798, 29384, (289375983275, 293759, 2347, (2098, 7987, 27599))))) >>> list(vistir.misc.unnest(nested_iterable)) [1234, 3456, 4398345, 234234, 2396, 23895750, 9283798, 29384, 289375983275, 293759, 2347, 2098, 7987, 27599]
def destroy(self): v = vagrant.Vagrant(root=os.getcwd(), quiet_stdout=False, quiet_stderr=True) v.destroy()
Destroy all vagrant box involved in the deployment.
def time(ctx: Context, command: str): with timer.Timing(verbose=True): proc = run(command, shell=True) ctx.exit(proc.returncode)
Time the output of a command.
def is_file_secure(file_name): if not os.path.isfile(file_name): return True file_mode = os.stat(file_name).st_mode if file_mode & (stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH): return False return True
Returns false if file is considered insecure, true if secure. If file doesn't exist, it's considered secure!
def inferTM(self, bottomUp, externalInput): self.reset() self.tm.compute(bottomUp, basalInput=externalInput, learn=False) return self.tm.getPredictiveCells()
Run inference and return the set of predicted cells
def create(self, provider, names, **kwargs): mapper = salt.cloud.Map(self._opts_defaults()) providers = self.opts['providers'] if provider in providers: provider += ':{0}'.format(next(six.iterkeys(providers[provider]))) else: return False if isinstance(names, six.string_types): names = names.split(',') ret = {} for name in names: vm_ = kwargs.copy() vm_['name'] = name vm_['driver'] = provider vm_['profile'] = None vm_['provider'] = provider ret[name] = salt.utils.data.simple_types_filter( mapper.create(vm_)) return ret
Create the named VMs, without using a profile Example: .. code-block:: python client.create(provider='my-ec2-config', names=['myinstance'], image='ami-1624987f', size='t1.micro', ssh_username='ec2-user', securitygroup='default', delvol_on_destroy=True)
def has_next_async(self): if self._fut is None: self._fut = self._iter.getq() flag = True try: yield self._fut except EOFError: flag = False raise tasklets.Return(flag)
Return a Future whose result will say whether a next item is available. See the module docstring for the usage pattern.
def get_xy(self, yidx, xidx=0): assert isinstance(xidx, int) if isinstance(yidx, int): yidx = [yidx] t_vars = self.concat_t_vars() xdata = t_vars[:, xidx] ydata = t_vars[:, yidx] return xdata.tolist(), ydata.transpose().tolist()
Return stored data for the given indices for plot :param yidx: the indices of the y-axis variables(1-indexing) :param xidx: the index of the x-axis variables :return: None
def target_timestamp(self) -> datetime: timestamp = DB.get_hash_value(self._key, 'target_timestamp') return datetime_from_isoformat(timestamp)
Get the target state timestamp.
def countries(self): if self._countries is None: self._countries = CountryList(self._version, ) return self._countries
Access the countries :returns: twilio.rest.voice.v1.dialing_permissions.country.CountryList :rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryList
def get_energies(atoms_list): if len(atoms_list) == 1: return atoms_list[0].get_potential_energy() elif len(atoms_list) > 1: energies = [] for atoms in atoms_list: energies.append(atoms.get_potential_energy()) return energies
Potential energy for a list of atoms objects
def get_activity(self, activity_id, include_all_efforts=False): raw = self.protocol.get('/activities/{id}', id=activity_id, include_all_efforts=include_all_efforts) return model.Activity.deserialize(raw, bind_client=self)
Gets specified activity. Will be detail-level if owned by authenticated user; otherwise summary-level. http://strava.github.io/api/v3/activities/#get-details :param activity_id: The ID of activity to fetch. :type activity_id: int :param inclue_all_efforts: Whether to include segment efforts - only available to the owner of the activty. :type include_all_efforts: bool :rtype: :class:`stravalib.model.Activity`
def reorder(self, module=None): module = module._mdl if module is not None else ffi.NULL lib.EnvReorderAgenda(self._env, module)
Reorder the Activations in the Agenda. If no Module is specified, the current one is used. To be called after changing the conflict resolution strategy.
def next_state(self, index, event_time, population_view): return _next_state(index, event_time, self.transition_set, population_view)
Moves a population between different states using information this state's `transition_set`. Parameters ---------- index : iterable of ints An iterable of integer labels for the simulants. event_time : pandas.Timestamp When this transition is occurring. population_view : vivarium.framework.population.PopulationView A view of the internal state of the simulation.
def init_properties(self) -> 'PygalleBaseClass': self._pigalle = { PygalleBaseClass.__KEYS.INTERNALS: dict(), PygalleBaseClass.__KEYS.PUBLIC: dict() } return self
Initialize the Pigalle properties. # Returns: PygalleBaseClass: The current instance.
def _element_to_bson(key, value, check_keys, opts): if not isinstance(key, string_type): raise InvalidDocument("documents must have only string keys, " "key was %r" % (key,)) if check_keys: if key.startswith("$"): raise InvalidDocument("key %r must not start with '$'" % (key,)) if "." in key: raise InvalidDocument("key %r must not contain '.'" % (key,)) name = _make_name(key) return _name_value_to_bson(name, value, check_keys, opts)
Encode a single key, value pair.
def process_from_web(): logger.info('Downloading table from %s' % trrust_human_url) res = requests.get(trrust_human_url) res.raise_for_status() df = pandas.read_table(io.StringIO(res.text)) tp = TrrustProcessor(df) tp.extract_statements() return tp
Return a TrrustProcessor based on the online interaction table. Returns ------- TrrustProcessor A TrrustProcessor object that has a list of INDRA Statements in its statements attribute.
def remove_accounts_from_group(accounts_query, group): query = accounts_query.filter(date_deleted__isnull=True) for account in query: remove_account_from_group(account, group)
Remove accounts from group.
def get_resource_name(context, expand_polymorphic_types=False): from rest_framework_json_api.serializers import PolymorphicModelSerializer view = context.get('view') if not view: return None try: code = str(view.response.status_code) except (AttributeError, ValueError): pass else: if code.startswith('4') or code.startswith('5'): return 'errors' try: resource_name = getattr(view, 'resource_name') except AttributeError: try: serializer = view.get_serializer_class() if expand_polymorphic_types and issubclass(serializer, PolymorphicModelSerializer): return serializer.get_polymorphic_types() else: return get_resource_type_from_serializer(serializer) except AttributeError: try: resource_name = get_resource_type_from_model(view.model) except AttributeError: resource_name = view.__class__.__name__ if not isinstance(resource_name, six.string_types): return resource_name resource_name = format_resource_type(resource_name) return resource_name
Return the name of a resource.
def overlap(xl1, yl1, nx1, ny1, xl2, yl2, nx2, ny2): return (xl2 < xl1+nx1 and xl2+nx2 > xl1 and yl2 < yl1+ny1 and yl2+ny2 > yl1)
Determines whether two windows overlap
def get_random_theme(dark=True): themes = [theme.path for theme in list_themes(dark)] random.shuffle(themes) return themes[0]
Get a random theme file.
def create_api_ipv6(self): return ApiIPv6( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of Api IPv6 services facade.
def _status_change(id, new_status): job_info = json.loads(r_client.get(id)) old_status = job_info['status'] job_info['status'] = new_status _deposit_payload(job_info) return old_status
Update the status of a job The status associated with the id is updated, an update command is issued to the job's pubsub, and and the old status is returned. Parameters ---------- id : str The job ID new_status : str The status change Returns ------- str The old status
def __load_yml(self, stream): try: return yaml.load(stream, Loader=yaml.SafeLoader) except ValueError as e: cause = "invalid yml format. %s" % str(e) raise InvalidFormatError(cause=cause)
Load yml stream into a dict object
def get_entry(journal_location, date): if not isinstance(date, datetime.date): return None try: with open(build_journal_path(journal_location, date), "r") as entry_file: return entry_file.read() except IOError: return None
args date - date object returns entry text or None if entry doesn't exist
def _dfs_preorder(node, visited): if node not in visited: visited.add(node) yield node if node.lo is not None: yield from _dfs_preorder(node.lo, visited) if node.hi is not None: yield from _dfs_preorder(node.hi, visited)
Iterate through nodes in DFS pre-order.
def make_het_matrix(fn): vcf_df = vcf_as_df(fn) variant_ids = vcf_df.apply(lambda x: df_variant_id(x), axis=1) vcf_reader = pyvcf.Reader(open(fn, 'r')) record = vcf_reader.next() hets = pd.DataFrame(0, index=variant_ids, columns=[x.sample for x in record.samples]) vcf_reader = pyvcf.Reader(open(fn, 'r')) for record in vcf_reader: h = record.get_hets() i = record_variant_id(record) hets.ix[i, [x.sample for x in h]] = 1 return hets
Make boolean matrix of samples by variants. One indicates that the sample is heterozygous for that variant. Parameters: ----------- vcf : str Path to VCF file.
def _from_dict(cls, _dict): args = {} if 'matching_results' in _dict: args['matching_results'] = _dict.get('matching_results') if 'results' in _dict: args['results'] = [ QueryResult._from_dict(x) for x in (_dict.get('results')) ] if 'aggregations' in _dict: args['aggregations'] = [ QueryAggregation._from_dict(x) for x in (_dict.get('aggregations')) ] if 'passages' in _dict: args['passages'] = [ QueryPassages._from_dict(x) for x in (_dict.get('passages')) ] if 'duplicates_removed' in _dict: args['duplicates_removed'] = _dict.get('duplicates_removed') if 'session_token' in _dict: args['session_token'] = _dict.get('session_token') if 'retrieval_details' in _dict: args['retrieval_details'] = RetrievalDetails._from_dict( _dict.get('retrieval_details')) return cls(**args)
Initialize a QueryResponse object from a json dictionary.
def get_revocation_key(self, user): value = '' if self.invalidate_on_password_change: value += user.password if self.one_time: value += str(user.last_login) return value
When the value returned by this method changes, this revocates tokens. It always includes the password so that changing the password revokes existing tokens. In addition, for one-time tokens, it also contains the last login datetime so that logging in revokes existing tokens.
def split_spec(spec, sep): parts = spec.rsplit(sep, 1) spec_start = parts[0].strip() spec_end = '' if len(parts) == 2: spec_end = parts[-1].strip() return spec_start, spec_end
Split a spec by separator and return stripped start and end parts.
def search_last_modified_unique_identities(db, after): with db.connect() as session: query = session.query(UniqueIdentity.uuid).\ filter(UniqueIdentity.last_modified >= after) uids = [uid.uuid for uid in query.order_by(UniqueIdentity.uuid).all()] return uids
Look for the uuids of unique identities modified on or after a given date. This function returns the uuids of unique identities modified on the given date or after it. The result is a list of uuids unique identities. :param db: database manager :param after: look for identities modified on or after this date :returns: a list of uuids of unique identities modified
def setPrivates(self, fieldDict) : for priv in self.privates : if priv in fieldDict : setattr(self, priv, fieldDict[priv]) else : setattr(self, priv, None) if self._id is not None : self.URL = "%s/%s" % (self.documentsURL, self._id)
will set self._id, self._rev and self._key field.
def set_label(self): if not self.field.label or self.attrs.get("_no_label"): return self.values["label"] = format_html( LABEL_TEMPLATE, self.field.html_name, mark_safe(self.field.label) )
Set label markup.
def make_mapping(self) -> None: start_mark = StreamMark('generated node', 0, 0, 0) end_mark = StreamMark('generated node', 0, 0, 0) self.yaml_node = yaml.MappingNode('tag:yaml.org,2002:map', list(), start_mark, end_mark)
Replaces the node with a new, empty mapping. Note that this will work on the Node object that is passed to \ a yatiml_savorize() or yatiml_sweeten() function, but not on \ any of its attributes or items. If you need to set an attribute \ to a complex value, build a yaml.Node representing it and use \ set_attribute with that.
def add_rule(self, ip_protocol, from_port, to_port, src_group_name, src_group_owner_id, cidr_ip): rule = IPPermissions(self) rule.ip_protocol = ip_protocol rule.from_port = from_port rule.to_port = to_port self.rules.append(rule) rule.add_grant(src_group_name, src_group_owner_id, cidr_ip)
Add a rule to the SecurityGroup object. Note that this method only changes the local version of the object. No information is sent to EC2.
def filter(self, chamber, congress=CURRENT_CONGRESS, **kwargs): check_chamber(chamber) kwargs.update(chamber=chamber, congress=congress) if 'state' in kwargs and 'district' in kwargs: path = ("members/{chamber}/{state}/{district}/" "current.json").format(**kwargs) elif 'state' in kwargs: path = ("members/{chamber}/{state}/" "current.json").format(**kwargs) else: path = ("{congress}/{chamber}/" "members.json").format(**kwargs) return self.fetch(path, parse=lambda r: r['results'])
Takes a chamber and Congress, OR state and district, returning a list of members
def data_received(self, data): if self._on_data: self._on_data(data) return self._queued_data.append(data)
Used to signal `asyncio.Protocol` of incoming data.
def get_processor(self, entity_id, sp_config): processor_string = sp_config.get('processor', None) if processor_string: try: return import_string(processor_string)(entity_id) except Exception as e: logger.error("Failed to instantiate processor: {} - {}".format(processor_string, e), exc_info=True) raise return BaseProcessor(entity_id)
Instantiate user-specified processor or default to an all-access base processor. Raises an exception if the configured processor class can not be found or initialized.
def write(self): self.ix_command('write') stream_warnings = self.streamRegion.generateWarningList() warnings_list = (self.api.call('join ' + ' {' + stream_warnings + '} ' + ' LiStSeP').split('LiStSeP') if self.streamRegion.generateWarningList() else []) for warning in warnings_list: if warning: raise StreamWarningsError(warning)
Write configuration to chassis. Raise StreamWarningsError if configuration warnings found.
def permission_to_perm(permission): app_label = permission.content_type.app_label codename = permission.codename return '%s.%s' % (app_label, codename)
Convert a permission instance to a permission-string. Examples -------- >>> permission = Permission.objects.get( ... content_type__app_label='auth', ... codename='add_user', ... ) >>> permission_to_perm(permission) 'auth.add_user'
def remove(self, template): self.templates = [t for t in self.templates if t != template] return self
Remove a template from the tribe. :type template: :class:`eqcorrscan.core.match_filter.Template` :param template: Template to remove from tribe .. rubric:: Example >>> tribe = Tribe(templates=[Template(name='c'), Template(name='b'), ... Template(name='a')]) >>> tribe.remove(tribe.templates[0]) Tribe of 2 templates
def items(self): return {dep.task: value for dep, value in self._result.items()}.items()
Returns dictionary items
def connect(self, socket_or_address): if isinstance(socket_or_address, tuple): import socket self.socket = socket.create_connection(socket_or_address) else: self.socket = socket_or_address address = None self.handler = EPCClientHandler(self.socket, address, self) self.call = self.handler.call self.call_sync = self.handler.call_sync self.methods = self.handler.methods self.methods_sync = self.handler.methods_sync self.handler_thread = newthread(self, target=self.handler.start) self.handler_thread.daemon = self.thread_daemon self.handler_thread.start() self.handler.wait_until_ready()
Connect to server and start serving registered functions. :type socket_or_address: tuple or socket object :arg socket_or_address: A ``(host, port)`` pair to be passed to `socket.create_connection`, or a socket object.
def register_agent(self, host, sweep_id=None, project_name=None): mutation = gql( ) if project_name is None: project_name = self.settings('project') def no_retry_400(e): if not isinstance(e, requests.HTTPError): return True if e.response.status_code != 400: return True body = json.loads(e.response.content) raise UsageError(body['errors'][0]['message']) response = self.gql(mutation, variable_values={ 'host': host, 'entityName': self.settings("entity"), 'projectName': project_name, 'sweep': sweep_id}, check_retry_fn=no_retry_400) return response['createAgent']['agent']
Register a new agent Args: host (str): hostname persistent (bool): long running or oneoff sweep (str): sweep id project_name: (str): model that contains sweep
def show_input(self, template_helper, language, seed): header = ParsableText(self.gettext(language,self._header), "rst", translation=self._translations.get(language, gettext.NullTranslations())) return str(DisplayableCodeProblem.get_renderer(template_helper).tasks.code(self.get_id(), header, 8, 0, self._language, self._optional, self._default))
Show BasicCodeProblem and derivatives
def with_descriptor(self, descriptor): res = {} desc = "%s_descriptor" % descriptor for eid, ent in self.items(): if desc in ent: res[eid] = ent return res
Returns any entities with the specified descriptor
def read_stream(self, file: IO, data_stream: DataStream) -> Reply: yield from data_stream.read_file(file=file) reply = yield from self._control_stream.read_reply() self.raise_if_not_match( 'End stream', ReplyCodes.closing_data_connection, reply ) data_stream.close() return reply
Read from the data stream. Args: file: A destination file object or a stream writer. data_stream: The stream of which to read from. Coroutine. Returns: Reply: The final reply.
def add(self, name, definition): self._storage[name] = self._expand_definition(definition)
Register a definition to the registry. Existing definitions are replaced silently. :param name: The name which can be used as reference in a validation schema. :type name: :class:`str` :param definition: The definition. :type definition: any :term:`mapping`
def pw_score_cosine(self, s1 : ClassId, s2 : ClassId) -> SimScore: df = self.assoc_df slice1 = df.loc[s1].values slice2 = df.loc[s2].values return 1 - cosine(slice1, slice2)
Cosine similarity of two subjects Arguments --------- s1 : str class id Return ------ number A number between 0 and 1