docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Encrypts a string using a given rsa.PublicKey object. If the message
is larger than the key, it will split it up into a list and encrypt
each line in the list.
Args:
message (string): The string to encrypt.
public_key (rsa.PublicKey): The key object used to encrypt the
... | def encrypt(self, message, public_key):
# Get the maximum message length based on the key
max_str_len = rsa.common.byte_size(public_key.n) - 11
# If the message is longer than the key size, split it into a list to
# be encrypted
if len(message) > max_str_len:
... | 879,697 |
Decrypts a string using our own private key object.
Args:
message (string): The string of the message to decrypt.
Returns:
The unencrypted string. | def decrypt(self, message):
# Unserialize the encrypted message
message = json.loads(message)
# Set up a list for the unencrypted lines of the message
unencrypted_msg = []
for line in message:
# Convert from ascii back to bytestring
enc_line =... | 879,698 |
Create dummies for the elements of a set-valued column. Operates in place.
Args:
df: data frame
columns: either a dictionary of column: values pairs or a collection of columns.
cast: whether or not to cast values to set
drop: whether or not to drop the binarized columns
TODO: mak... | def binarize_sets(df, columns, cast=False, drop=True, min_freq=None):
for column in columns:
d = df[column].dropna() # avoid nulls
if cast:
d = d.apply(set)
values = columns[column] if isinstance(columns, dict) else util.union(d)
for value in values:
na... | 879,770 |
Performs mean imputation on a pandas dataframe.
Args:
train: an optional training mask with which to compute the mean
value: instead of computing the mean, use this as the value argument to fillna
dropna: whether to drop all null columns
inplace: whether to perform the imputation inp... | def impute(X, value=None, train=None, dropna=True, inplace=True):
if value is None:
Xfit = X[train] if train is not None else X
value = Xfit.mean()
else:
if train is not None:
raise ValueError("Cannot pass both train and value arguments")
if dropna:
null_col... | 879,776 |
Copy source -> destination
Args:
source (str | None): Source file or folder
destination (str | None): Destination file or folder
ignore (callable | list | str | None): Names to be ignored
adapter (callable | None): Optional function to call on 'source' before copy
fatal (boo... | def copy(source, destination, ignore=None, adapter=None, fatal=True, logger=LOG.debug):
return _file_op(source, destination, _copy, adapter, fatal, logger, ignore=ignore) | 880,055 |
Perform re.sub with the patterns in the given dict
Args:
dict_: {pattern: repl}
source: str | def substitute(dict_, source):
d_esc = (re.escape(k) for k in dict_.keys())
pattern = re.compile('|'.join(d_esc))
return pattern.sub(lambda x: dict_[x.group()], source) | 880,108 |
Processes messages that have been delivered from the listener.
Args:
data (dict): A dictionary containing the uuid, euuid, and message
response. E.g. {"cuuid": x, "euuid": y, "response": z}.
Returns:
None | def retransmit(self, data):
# If that shit is still in self.event_uuids, then that means we STILL
# haven't gotten a response from the client. Then we resend that shit
# and WAIT
if data["euuid"] in self.event_uuids:
# Increment the current retry count of the euuid
... | 880,135 |
Processes messages that have been delivered from the listener.
Args:
msg (string): The raw packet data delivered from the listener. This
data will be unserialized and then processed based on the packet's
method.
host (tuple): The (address, host) tuple of the source m... | def handle_message(self, msg, host):
response = None
# Unserialize the packet, and decrypt if the host has encryption enabled
if host in self.encrypted_hosts:
msg_data = unserialize_data(msg, self.compression, self.encryption)
else:
msg_data = unseriali... | 880,136 |
Processes messages that have been delivered by a registered client.
Args:
msg (string): The raw packet data delivered from the listener. This
data will be unserialized and then processed based on the packet's
method.
host (tuple): The (address, host) tuple of the sou... | def handle_message_registered(self, msg_data, host):
response = None
if msg_data["method"] == "EVENT":
logger.debug("<%s> <euuid:%s> Event message "
"received" % (msg_data["cuuid"], msg_data["euuid"]))
response = self.event(msg_data["cuuid"],
... | 880,137 |
This function simply returns the server version number as a response
to the client.
Args:
message (dict): A dictionary of the autodiscover message from the
client.
Returns:
A JSON string of the "OHAI Client" server response with the server's
version nu... | def autodiscover(self, message):
# Check to see if the client's version is the same as our own.
if message["version"] in self.allowed_versions:
logger.debug("<%s> Client version matches server "
"version." % message["cuuid"])
response = serialize... | 880,138 |
This function will check to see if a given host with client uuid is
currently registered.
Args:
cuuid (string): The client uuid that wishes to register.
host (tuple): The (address, port) tuple of the client that is
registering.
Returns:
Will return Tru... | def is_registered(self, cuuid, host):
# Check to see if the host with the client uuid exists in the registry
# table.
if (cuuid in self.registry) and (self.registry[cuuid]["host"] == host):
return True
else:
return False | 880,140 |
Register the extension with Sphinx.
Args:
app: The Sphinx application. | def setup(app):
for name, (default, rebuild, _) in ref.CONFIG_VALUES.iteritems():
app.add_config_value(name, default, rebuild)
app.add_directive('javaimport', ref.JavarefImportDirective)
app.add_role('javaref', ref.JavarefRole(app))
app.connect('builder-inited', initialize_env)
app.c... | 880,205 |
Purge expired values from the environment.
When certain configuration values change, related values in the
environment must be cleared. While Sphinx can rebuild documents on
configuration changes, it does not notify extensions when this
happens. Instead, cache relevant values in the environment in orde... | def validate_env(app):
if not hasattr(app.env, 'javalink_config_cache'):
app.env.javalink_config_cache = {}
for conf_attr, (_, _, env_attr) in ref.CONFIG_VALUES.iteritems():
if not env_attr:
continue
value = getattr(app.config, conf_attr)
cached = app.env.java... | 880,206 |
Example:
short("examined /Users/joe/foo") => "examined ~/foo"
Args:
path: Path to represent in its short form
Returns:
(str): Short form, using '~' if applicable | def short(cls, path):
if not path:
return path
path = str(path)
if cls.paths:
for p in cls.paths:
if p:
path = path.replace(p + "/", "")
path = path.replace(cls.home, "~")
return path | 880,277 |
cartesian product of dict whose values are lists
Args:
d: dictionary to take product of. multiple dictionaries will first
be merged by dict_merge
kwargs: additional kwargs for convenience
Returns:
a list of dictionaries with the same keys as d and kwargs | def dict_product(*d, **kwargs):
d = dict(dict_merge(*d), **kwargs)
holdout = {k: d[k] for k in d if not isinstance(d[k], list)}
d = {k: d[k] for k in d if k not in holdout}
items = d.items()
if len(items) == 0:
dicts = [{}]
else:
keys, values = zip(*items)
dicts = [... | 880,304 |
Indent all new lines
Args:
n_spaces: number of spaces to use for indentation
initial: whether or not to start with an indent | def indent(s, n_spaces=2, initial=True):
i = ' '*n_spaces
t = s.replace('\n', '\n%s' % i)
if initial:
t = i + t
return t | 880,308 |
Create a new target representing a task and its parameters
Args:
task: Task instance to create target for; the task class has to inherit
from :class:`ozelot.tasks.TaskBase`.
Returns:
ozelot.tasks.ORMTarget: a new target instance | def from_task(cls, task):
target = cls(name=task.get_name(),
params=task.get_param_string())
return target | 880,317 |
Base query for a target.
Args:
session: database session to query in | def _base_query(self, session):
return session.query(ORMTargetMarker) \
.filter(ORMTargetMarker.name == self.name) \
.filter(ORMTargetMarker.params == self.params) | 880,318 |
Store entities and their attributes
Args:
df (pandas.DataFrame): data to store (storing appends 'id' and 'type' columns!)
attribute_columns (list(str)): list of column labels that define attributes | def store(self, df, attribute_columns):
# ID start values depend on currently stored entities/attributes!
entity_id_start = models.Entity.get_max_id(self.session) + 1
attribute_id_start = models.Attribute.get_max_id(self.session) + 1
# append ID and type columns
df['id... | 880,326 |
Starts the listen loop. If threading is enabled, then the loop will
be started in its own thread.
Args:
None
Returns:
None | def listen(self):
self.listening = True
if self.threading:
from threading import Thread
self.listen_thread = Thread(target=self.listen_loop)
self.listen_thread.daemon = True
self.listen_thread.start()
self.scheduler_thread = Thread(t... | 880,335 |
Starts the listen loop and executes the receieve_datagram method
whenever a packet is receieved.
Args:
None
Returns:
None | def listen_loop(self):
while self.listening:
try:
data, address = self.sock.recvfrom(self.bufsize)
self.receive_datagram(data, address)
if self.stats_enabled:
self.stats['bytes_recieved'] += len(data)
except so... | 880,336 |
Starts the scheduler to check for scheduled calls and execute them
at the correct time.
Args:
sleep_time (float): The amount of time to wait in seconds between
each loop iteration. This prevents the scheduler from consuming
100% of the host's CPU. Defaults to 0.2 secon... | def scheduler(self, sleep_time=0.2):
while self.listening:
# If we have any scheduled calls, execute them and remove them from
# our list of scheduled calls.
if self.scheduled_calls:
timestamp = time.time()
self.scheduled_calls[:] = [... | 880,337 |
Executes when UDP data has been received and sends the packet data
to our app to process the request.
Args:
data (str): The raw serialized packet data received.
address (tuple): The address and port of the origin of the received
packet. E.g. (address, port).
Ret... | def receive_datagram(self, data, address):
# If we do not specify an application, just print the data.
if not self.app:
logger.debug("Packet received", address, data)
return False
# Send the data we've recieved from the network and send it
# to our appl... | 880,341 |
Generate analysis output as html page
Args:
query_module (module): module to use for querying data for the
desired model/pipeline variant, e.g. leonardo.standard.queries | def plots_html_page(query_module):
# page template
template = jenv.get_template("analysis.html")
# container for template context
context = dict(extended=config.EXTENDED)
# a database client/session to run queries in
cl = client.get_client()
session = cl.create_session()
# gener... | 880,372 |
Convert `value` to bytes, accepts notations such as "4k" to mean 4096 bytes
Args:
value (str | unicode | int | None): Number of bytes optionally suffixed by a char from UNITS
default_unit (str | unicode | None): Default unit to use for unqualified values
base (int): Base to use (usually 102... | def to_bytesize(value, default_unit=None, base=DEFAULT_BASE):
if isinstance(value, (int, float)):
return unitized(value, default_unit, base)
if value is None:
return None
try:
if value[-1].lower() == "b":
# Accept notations such as "1mb", as they get used out of ha... | 880,631 |
BGEN file reader.
Args:
filename (str): The name of the BGEN file.
sample_filename (str): The name of the sample file (optional).
probability_threshold (float): The probability threshold. | def __init__(self, filename, sample_filename=None, chromosome=None,
probability_threshold=0.9, cpus=1):
# The BGEN reader (parallel or no)
if cpus == 1:
self.is_parallel = False
self._bgen = PyBGEN(filename, prob_t=probability_threshold)
else:
... | 880,774 |
Get the genotypes from a well formed variant instance.
Args:
marker (Variant): A Variant instance.
Returns:
A list of Genotypes instance containing a pointer to the variant as
well as a vector of encoded genotypes. | def get_variant_genotypes(self, variant):
# The chromosome to search for (if a general one is set, that's the one
# we need to search for)
chrom = variant.chrom.name
if self.chrom is not None and chrom == self.chrom:
chrom = "NA"
# Getting the results
... | 880,775 |
Iterates over the genotypes for variants using a list of names.
Args:
names (list): The list of names for variant extraction. | def iter_variants_by_names(self, names):
if not self.is_parallel:
yield from super().iter_variants_by_names(names)
else:
for info, dosage in self._bgen.iter_variants_by_names(names):
yield Genotypes(
Variant(info.name,
... | 880,779 |
Get the genotype of a marker using it's name.
Args:
name (str): The name of the marker.
Returns:
list: A list of Genotypes. | def get_variant_by_name(self, name):
results = []
try:
for info, dosage in self._bgen.get_variant(name):
results.append(Genotypes(
Variant(
info.name,
CHROM_STR_ENCODE.get(info.chrom, info.chrom),
... | 880,780 |
Iterates over the genotypes for variants using a list of names.
Args:
names (list): The list of names for variant extraction. | def iter_variants_by_names(self, names):
for name in names:
for result in self.get_variant_by_name(name):
yield result | 880,877 |
Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
c... | def avg_grads(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent ... | 881,183 |
Process a string of content for include tags.
This function assumes there are no blocks in the content. The content is split into segments,
with include tags being replaced by Block objects.
PARAMETERS:
content -- str; content to be converted into a Block.
block_map -- BlockMap
link_stac... | def process_links(include_match, block_map, link_stack, source_path):
leading_whitespace = include_match.group(1)
include_path = include_match.group(2)
# Optional block name. If match is None, block name was ommitted (default to 'all').
block_name = include_match.group(3)
if block_name is not ... | 881,310 |
IMPUTE2 file reader.
Args:
filename (str): The name of the IMPUTE2 file.
sample_filename (str): The name of the SAMPLE file.
probability_threshold (float): The probability threshold.
Note
====
If the sample IDs are not unique, the index is change... | def __init__(self, filename, sample_filename, probability_threshold=0.9):
# Reading the samples
self.samples = pd.read_csv(sample_filename, sep=" ", skiprows=2,
names=["fid", "iid", "missing", "father",
"mother", "sex"... | 881,404 |
Get the genotypes from a well formed variant instance.
Args:
marker (Variant): A Variant instance.
Returns:
A list of Genotypes instance containing a pointer to the variant as
well as a vector of encoded genotypes. | def get_variant_genotypes(self, variant):
if not self.has_index:
raise NotImplementedError("Not implemented when IMPUTE2 file is "
"not indexed (see genipe)")
# Find the variant in the index
try:
impute2_chrom = CHROM_STR_TO... | 881,405 |
Parses the current IMPUTE2 line (a single variant).
Args:
line (str): An IMPUTE2 line.
Returns:
Genotypes: The genotype in dosage format.
Warning
=======
By default, the genotypes object has multiallelic set to False. | def _parse_impute2_line(self, line):
# Splitting
row = line.rstrip("\r\n").split(" ")
# Constructing the probabilities
prob = np.array(row[5:], dtype=float)
prob.shape = (prob.shape[0] // 3, 3)
# Constructing the dosage
dosage = 2 * prob[:, 2] + prob[:,... | 881,413 |
get ore:aggregates for this resource, optionally retrieving resource payload
Args:
retrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload | def get_related(self):
if self.exists and hasattr(self.rdf.triples, 'ore') and hasattr(self.rdf.triples.ore, 'aggregates'):
related = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.ore.aggregates ]
# return
return related
else:
return [] | 881,416 |
get pcdm:hasMember for this resource
Args:
retrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload | def get_members(self, retrieve=False):
if self.exists and hasattr(self.rdf.triples, 'pcdm') and hasattr(self.rdf.triples.pcdm, 'hasMember'):
members = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.pcdm.hasMember ]
# return
return members
else:
return [] | 881,420 |
get pcdm:hasFile for this resource
Args:
retrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload | def get_files(self, retrieve=False):
if self.exists and hasattr(self.rdf.triples, 'pcdm') and hasattr(self.rdf.triples.pcdm, 'hasFile'):
files = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.pcdm.hasFile ]
# return
return files
else:
return [] | 881,421 |
get pcdm:hasRelatedFile for this resource
Args:
retrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload | def get_associated(self, retrieve=False):
if self.exists and hasattr(self.rdf.triples, 'pcdm') and hasattr(self.rdf.triples.pcdm, 'hasRelatedFile'):
files = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.pcdm.hasRelatedFile ]
# return
return files
else:
return [] | 881,422 |
Binary plink file reader.
Args:
prefix (str): the prefix of the Plink binary files. | def __init__(self, prefix):
self.bed = PyPlink(prefix)
self.bim = self.bed.get_bim()
self.fam = self.bed.get_fam()
# Identify all multi-allelics.
self.bim["multiallelic"] = False
self.bim.loc[
self.bim.duplicated(["chrom", "pos"], keep=False),
... | 881,431 |
Get the genotypes from a well formed variant instance.
Args:
marker (Variant): A Variant instance.
Returns:
A list of Genotypes instance containing a pointer to the variant as
well as a vector of encoded genotypes.
Note
====
If the sampl... | def get_variant_genotypes(self, variant):
# Find the variant in the bim.
try:
plink_chrom = CHROM_STR_TO_INT[variant.chrom.name]
except KeyError:
raise ValueError(
"Invalid chromosome ('{}') for Plink.".format(variant.chrom)
)
... | 881,432 |
Get the genotype of a marker using it's name.
Args:
name (str): The name of the marker.
Returns:
list: A list of Genotypes (only one for PyPlink, see note below).
Note
====
From PyPlink version 1.3.2 and onwards, each name is unique in the
... | def get_variant_by_name(self, name):
# From 1.3.2 onwards, PyPlink sets unique names.
# Getting the genotypes
try:
geno, i = self.bed.get_geno_marker(name, return_index=True)
except ValueError:
if name in self.bed.get_duplicated_markers():
... | 881,438 |
Compute LD between a marker and a list of markers.
Args:
cur_geno (Genotypes): The genotypes of the marker.
other_genotypes (list): A list of genotypes.
Returns:
numpy.array: An array containing the r or r**2 values between cur_geno
and other_genotypes.
Note:
... | def compute_ld(cur_geno, other_genotypes, r2=False):
# Normalizing the current genotypes
norm_cur = normalize_genotypes(cur_geno)
# Normalizing and creating the matrix for the other genotypes
norm_others = np.stack(
tuple(normalize_genotypes(g) for g in other_genotypes),
axis=1,
... | 881,475 |
Normalize the genotypes.
Args:
genotypes (Genotypes): The genotypes to normalize.
Returns:
numpy.array: The normalized genotypes. | def normalize_genotypes(genotypes):
genotypes = genotypes.genotypes
return (genotypes - np.nanmean(genotypes)) / np.nanstd(genotypes) | 881,476 |
Infer the antibiotics resistance of the given record.
Arguments:
record (`~Bio.SeqRecord.SeqRecord`): an annotated sequence.
Raises:
RuntimeError: when there's not exactly one resistance cassette. | def find_resistance(record):
for feature in record.features:
labels = set(feature.qualifiers.get("label", []))
cassettes = labels.intersection(_ANTIBIOTICS)
if len(cassettes) > 1:
raise RuntimeError("multiple resistance cassettes detected")
elif len(cassettes) == 1:
... | 881,559 |
Build a index for the given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index. | def generate_index(fn, cols=None, names=None, sep=" "):
# Some assertions
assert cols is not None, "'cols' was not set"
assert names is not None, "'names' was not set"
assert len(cols) == len(names)
# Getting the open function
bgzip, open_func = get_open_func(fn, return_fmt=True)
# Re... | 881,677 |
Get the opening function.
Args:
fn (str): the name of the file.
return_fmt (bool): if the file format needs to be returned.
Returns:
tuple: either a tuple containing two elements: a boolean telling if the
format is bgzip, and the opening function. | def get_open_func(fn, return_fmt=False):
# The file might be compressed using bgzip
bgzip = None
with open(fn, "rb") as i_file:
bgzip = i_file.read(3) == b"\x1f\x8b\x08"
if bgzip and not HAS_BIOPYTHON:
raise ValueError("needs BioPython to index a bgzip file")
open_func = open
... | 881,678 |
Restores the index for a given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index.
... | def get_index(fn, cols, names, sep):
if not has_index(fn):
# The index doesn't exists, generate it
return generate_index(fn, cols, names, sep)
# Retrieving the index
file_index = read_index(get_index_fn(fn))
# Checking the names are there
if len(set(names) - (set(file_index.co... | 881,679 |
Writes the index to file.
Args:
fn (str): the name of the file that will contain the index.
index (pandas.DataFrame): the index. | def write_index(fn, index):
with open(fn, "wb") as o_file:
o_file.write(_CHECK_STRING)
o_file.write(zlib.compress(bytes(
index.to_csv(None, index=False, encoding="utf-8"),
encoding="utf-8",
))) | 881,680 |
Reads index from file.
Args:
fn (str): the name of the file containing the index.
Returns:
pandas.DataFrame: the index of the file.
Before reading the index, we check the first couple of bytes to see if it
is a valid index file. | def read_index(fn):
index = None
with open(fn, "rb") as i_file:
if i_file.read(len(_CHECK_STRING)) != _CHECK_STRING:
raise ValueError("{}: not a valid index file".format(fn))
index = pd.read_csv(io.StringIO(
zlib.decompress(i_file.read()).decode(encoding="utf-8"),
... | 881,681 |
Indexes an IMPUTE2 file.
Args:
fn (str): The name of the IMPUTE2 file. | def index_impute2(fn):
logger.info("Indexing {} (IMPUTE2)".format(fn))
impute2_index(fn, cols=[0, 1, 2], names=["chrom", "name", "pos"], sep=" ")
logger.info("Index generated") | 881,695 |
Indexes a BGEN file.
Args:
fn (str): The name of the BGEN file. | def index_bgen(fn, legacy=False):
logger.info("Indexing {} (BGEN) using 'bgenix'{}".format(
fn, " (legacy mode)" if legacy else "",
))
command = ["bgenix", "-g", fn, "-index"]
if legacy:
command.append("-with-rowid")
try:
logger.info("Executing '{}'".format(" ".join(comm... | 881,696 |
Returns the default action fluents regardless of the current `state` and `timestep`.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
timestep (tf.Tensor): The current timestep.
Returns:
Sequence[tf.Tensor]: A tuple of action fluents. | def __call__(self,
state: Sequence[tf.Tensor],
timestep: tf.Tensor) -> Sequence[tf.Tensor]:
return self._default | 882,062 |
Render the simulated state-action `trajectories` for Navigation domain.
Args:
stats: Performance statistics.
trajectories: NonFluents, states, actions, interms and rewards.
batch: Number of batches to render. | def render(self,
trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array],
batch: Optional[int] = None) -> None:
non_fluents, initial_state, states, actions, interms, rewards = trajectories
non_fluents = dict(non_fluents)
states = dict((name, fluen... | 882,086 |
Prints the first batch of simulated `trajectories`.
Args:
trajectories: NonFluents, states, actions, interms and rewards. | def _render_trajectories(self,
trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array]) -> None:
if self._verbose:
non_fluents, initial_state, states, actions, interms, rewards = trajectories
shape = states[0][1].shape
batch_size, horizon, = ... | 882,129 |
Prints `non_fluents`, `states`, `actions`, `interms` and `rewards`
for given `horizon`.
Args:
states (Sequence[Tuple[str, np.array]]): A state trajectory.
actions (Sequence[Tuple[str, np.array]]): An action trajectory.
interms (Sequence[Tuple[str, np.array]]): An int... | def _render_batch(self,
non_fluents: NonFluents,
states: Fluents, actions: Fluents, interms: Fluents,
rewards: np.array,
horizon: Optional[int] = None) -> None:
if horizon is None:
horizon = len(states[0][1])
self._render_round_ini... | 882,130 |
Prints fluents and rewards for the given timestep `t`.
Args:
t (int): timestep
s (Sequence[Tuple[str], np.array]: State fluents.
a (Sequence[Tuple[str], np.array]: Action fluents.
f (Sequence[Tuple[str], np.array]: Interm state fluents.
r (np.float32)... | def _render_timestep(self,
t: int,
s: Fluents, a: Fluents, f: Fluents,
r: np.float32) -> None:
print("============================")
print("TIME = {}".format(t))
print("============================")
fluent_variables = self._compiler.rddl.acti... | 882,131 |
Prints `fluents` of given `fluent_type` as list of instantiated variables
with corresponding values.
Args:
fluent_type (str): Fluent type.
fluents (Sequence[Tuple[str, np.array]]): List of pairs (fluent_name, fluent_values).
fluent_variables (Sequence[Tuple[str, List... | def _render_fluent_timestep(self,
fluent_type: str,
fluents: Sequence[Tuple[str, np.array]],
fluent_variables: Sequence[Tuple[str, List[str]]]) -> None:
for fluent_pair, variable_list in zip(fluents, fluent_variables):
name, fluent = fluent_pair
... | 882,132 |
Returns action fluents for the current `state` and `timestep`.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
timestep (tf.Tensor): The current timestep.
Returns:
Sequence[tf.Tensor]: A tuple of action fluents. | def __call__(self,
state: Sequence[tf.Tensor],
timestep: tf.Tensor) -> Sequence[tf.Tensor]:
raise NotImplementedError | 882,136 |
Renders the simulated `trajectories` for the given `batch`.
Args:
trajectories: NonFluents, states, actions, interms and rewards.
batch: Number of batches to render. | def render(self,
trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array],
batch: Optional[int] = None) -> None:
raise NotImplementedError | 882,164 |
Reads genotypes from a pandas DataFrame.
Args:
dataframe (pandas.DataFrame): The data.
map_info (pandas.DataFrame): The mapping information.
Note
====
The index of the dataframe should be the sample IDs. The index of
the map_info should be the va... | def __init__(self, dataframe, map_info):
self.df = dataframe
self.map_info = map_info | 882,256 |
Get the genotypes for a given variant (by name).
Args:
name (str): The name of the variant to retrieve the genotypes.
Returns:
list: A list of Genotypes. This is a list in order to keep the same
behaviour as the other functions. | def get_variant_by_name(self, name):
try:
geno = self.df.loc[:, name].values
info = self.map_info.loc[name, :]
except KeyError:
# The variant is not in the data, so we return an empty
# list
logging.variant_name_not_found(name)
... | 882,258 |
Hashes HTML block tags.
PARAMETERS:
text -- str; Markdown text
hashes -- dict; a dictionary of all hashes, where keys are hashes
and values are their unhashed versions.
When HTML block tags are used, all content inside the tags is
preserved as-is, without any Markdown processing. S... | def hash_blocks(text, hashes):
def sub(match):
block = match.group(1)
hashed = hash_text(block, 'block')
hashes[hashed] = block
return '\n\n' + hashed + '\n\n'
return re_block.sub(sub, text) | 882,428 |
parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef
Args:
uri (rdflib.term.URIRef,str): input URI
Returns:
rdflib.term.URIRef | def parse_uri(self, uri=None):
# no uri provided, assume root
if not uri:
return rdflib.term.URIRef(self.root)
# string uri provided
elif type(uri) == str:
# assume "short" uri, expand with repo root
if type(uri) == str and not uri.startswith('http'):
return rdflib.term.URIRef("%s%s" % (self... | 882,679 |
Convenience method for creating a new resource
Note: A Resource is instantiated, but is not yet created. Still requires resource.create().
Args:
uri (rdflib.term.URIRef, str): uri of resource to create
resource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): resource type... | def create_resource(self, resource_type=None, uri=None):
if resource_type in [NonRDFSource, Binary, BasicContainer, DirectContainer, IndirectContainer]:
return resource_type(self, uri)
else:
raise TypeError("expecting Resource type, such as BasicContainer or NonRDFSource") | 882,680 |
Request new transaction from repository, init new Transaction,
store in self.txns
Args:
txn_name (str): human name for transaction
Return:
(Transaction): returns intance of newly created transaction | def start_txn(self, txn_name=None):
# if no name provided, create one
if not txn_name:
txn_name = uuid.uuid4().hex
# request new transaction
txn_response = self.api.http_request('POST','%s/fcr:tx' % self.root, data=None, headers=None)
# if 201, transaction was created
if txn_response.status_code =... | 882,682 |
Retrieves known transaction and adds to self.txns.
TODO:
Perhaps this should send a keep-alive request as well? Obviously still needed, and would reset timer.
Args:
txn_prefix (str, rdflib.term.URIRef): uri of the transaction. e.g. http://localhost:8080/rest/txn:123456789
txn_name (str): local, human na... | def get_txn(self, txn_name, txn_uri):
# parse uri
txn_uri = self.parse_uri(txn_uri)
# request new transaction
txn_response = self.api.http_request('GET',txn_uri, data=None, headers=None)
# if 200, transaction exists
if txn_response.status_code == 200:
logger.debug("transactoin found: %s" % txn_uri... | 882,683 |
Keep current transaction alive, updates self.expires
Args:
None
Return:
None: sets new self.expires | def keep_alive(self):
# keep transaction alive
txn_response = self.api.http_request('POST','%sfcr:tx' % self.root, data=None, headers=None)
# if 204, transaction kept alive
if txn_response.status_code == 204:
logger.debug("continuing transaction: %s" % self.root)
# update status and timer
self.ac... | 882,685 |
Ends transaction by committing, or rolling back, all changes during transaction.
Args:
close_type (str): expects "commit" or "rollback"
Return:
(bool) | def _close(self, close_type):
# commit transaction
txn_response = self.api.http_request('POST','%sfcr:tx/fcr:%s' % (self.root, close_type), data=None, headers=None)
# if 204, transaction was closed
if txn_response.status_code == 204:
logger.debug("%s for transaction: %s, successful" % (close_type, self... | 882,686 |
parse resource type from self.http_request()
Note: uses isinstance() as plugins may extend these base LDP resource type.
Args:
response (requests.models.Response): response object
Returns:
[NonRDFSource, BasicContainer, DirectContainer, IndirectContainer] | def parse_resource_type(self, response):
# parse 'Link' header
links = [
link.split(";")[0].lstrip('<').rstrip('>')
for link in response.headers['Link'].split(', ')
if link.startswith('<http://www.w3.org/ns/ldp#')]
# parse resource type string with self.repo.namespace_manager.compute_qname()
ldp_... | 882,688 |
small function to parse RDF payloads from various repository endpoints
Args:
data (response.data): data from requests response
headers (response.headers): headers from requests response
Returns:
(rdflib.Graph): parsed graph | def parse_rdf_payload(self, data, headers):
# handle edge case for content-types not recognized by rdflib parser
if headers['Content-Type'].startswith('text/plain'):
logger.debug('text/plain Content-Type detected, using application/n-triples for parser')
parse_format = 'application/n-triples'
else:
... | 882,689 |
Small method to loop through three graphs in self.diffs, identify unique namespace URIs.
Then, loop through provided dictionary of prefixes and pin one to another.
Args:
None: uses self.prefixes and self.diffs
Returns:
None: sets self.update_namespaces and self.update_prefixes | def _derive_namespaces(self):
# iterate through graphs and get unique namespace uris
for graph in [self.diffs.overlap, self.diffs.removed, self.diffs.added]:
for s,p,o in graph:
try:
ns_prefix, ns_uri, predicate = graph.compute_qname(p) # predicates
self.update_namespaces.add(ns_uri)
excep... | 882,691 |
Using the three graphs derived from self._diff_graph(), build a sparql update query in the format:
PREFIX foo: <http://foo.com>
PREFIX bar: <http://bar.com>
DELETE {...}
INSERT {...}
WHERE {...}
Args:
None: uses variables from self
Returns:
(str) sparql update query as string | def build_query(self):
# derive namespaces to include prefixes in Sparql update query
self._derive_namespaces()
sparql_query = ''
# add prefixes
for ns_prefix, ns_uri in self.update_prefixes.items():
sparql_query += "PREFIX %s: <%s>\n" % (ns_prefix, str(ns_uri))
# deletes
removed_serialized = s... | 882,692 |
Handles response from self.create()
Args:
response (requests.models.Response): response object from self.create()
ignore_tombstone (bool): If True, will attempt creation, if tombstone exists (409), will delete tombstone and retry | def _handle_create(self, response, ignore_tombstone, auto_refresh):
# 201, success, refresh
if response.status_code == 201:
# if not specifying uri, capture from response and append to object
self.uri = self.repo.parse_uri(response.text)
# creation successful
if auto_refresh:
self.refresh()
... | 882,696 |
Small method to return headers of an OPTIONS request to self.uri
Args:
None
Return:
(dict) response headers from OPTIONS request | def options(self):
# http request
response = self.repo.api.http_request('OPTIONS', self.uri)
return response.headers | 882,697 |
Method to copy resource to another location
Args:
destination (rdflib.term.URIRef, str): URI location to move resource
Returns:
(Resource) new, moved instance of resource | def copy(self, destination):
# set move headers
destination_uri = self.repo.parse_uri(destination)
# http request
response = self.repo.api.http_request('COPY', self.uri, data=None, headers={'Destination':destination_uri.toPython()})
# handle response
if response.status_code == 201:
return destinat... | 882,699 |
Method to delete resources.
Args:
remove_tombstone (bool): If True, will remove tombstone at uri/fcr:tombstone when removing resource.
Returns:
(bool) | def delete(self, remove_tombstone=True):
response = self.repo.api.http_request('DELETE', self.uri)
# update exists
if response.status_code == 204:
# removal successful, updating self
self._empty_resource_attributes()
if remove_tombstone:
self.repo.api.http_request('DELETE', '%s/fcr:tombstone' % ... | 882,700 |
Performs GET request and refreshes RDF information for resource.
Args:
None
Returns:
None | def refresh(self, refresh_binary=True):
updated_self = self.repo.get_resource(self.uri)
# if resource type of updated_self != self, raise exception
if not isinstance(self, type(updated_self)):
raise Exception('Instantiated %s, but repository reports this resource is %s' % (type(updated_self), type(self))... | 882,701 |
Parse incoming rdf as self.rdf.orig_graph, create copy at self.rdf.graph
Args:
data (): payload from GET request, expected RDF content in various serialization formats
Returns:
None | def _build_rdf(self, data=None):
# recreate rdf data
self.rdf = SimpleNamespace()
self.rdf.data = data
self.rdf.prefixes = SimpleNamespace()
self.rdf.uris = SimpleNamespace()
# populate prefixes
for prefix,uri in self.repo.context.items():
setattr(self.rdf.prefixes, prefix, rdflib.Namespace(uri))
... | 882,702 |
use Content-Type from headers to determine parsing method
Args:
None
Return:
None: sets self.rdf by parsing data from GET request, or setting blank graph of resource does not yet exist | def _parse_graph(self):
# if resource exists, parse self.rdf.data
if self.exists:
self.rdf.graph = self.repo.api.parse_rdf_payload(self.rdf.data, self.headers)
# else, create empty graph
else:
self.rdf.graph = rdflib.Graph()
# bind any additional namespaces from repo instance, but do not override... | 882,703 |
method to parse triples from self.rdf.graph for object-like
access
Args:
None
Returns:
None: sets self.rdf.triples | def parse_object_like_triples(self):
# parse triples as object-like attributes in self.rdf.triples
self.rdf.triples = SimpleNamespace() # prepare triples
for s,p,o in self.rdf.graph:
# get ns info
ns_prefix, ns_uri, predicate = self.rdf.graph.compute_qname(p)
# if prefix as list not yet added, add... | 882,704 |
small method to empty values if resource is removed or absent
Args:
None
Return:
None: empties selected resource attributes | def _empty_resource_attributes(self):
self.status_code = 404
self.headers = {}
self.exists = False
# build RDF
self.rdf = self._build_rdf()
# if NonRDF, empty binary data
if type(self) == NonRDFSource:
self.binary.empty() | 882,707 |
Method to handle possible values passed for adding, removing, modifying triples.
Detects type of input and sets appropriate http://www.w3.org/2001/XMLSchema# datatype
Args:
object_input (str,int,datetime,): many possible inputs
Returns:
(rdflib.term.Literal): with appropriate datatype attribute | def _handle_object(self, object_input):
# if object is string, convert to rdflib.term.Literal with appropriate datatype
if type(object_input) == str:
return rdflib.term.Literal(object_input, datatype=rdflib.XSD.string)
# integer
elif type(object_input) == int:
return rdflib.term.Literal(object_input... | 882,708 |
add triple by providing p,o, assumes s = subject
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: adds triple to self.rdf.graph | def add_triple(self, p, o, auto_refresh=True):
self.rdf.graph.add((self.uri, p, self._handle_object(o)))
# determine if triples refreshed
self._handle_triple_refresh(auto_refresh) | 882,709 |
Assuming the predicate or object matches a single triple, sets the other for that triple.
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: modifies pre-existing triple in self.rdf.graph | def set_triple(self, p, o, auto_refresh=True):
self.rdf.graph.set((self.uri, p, self._handle_object(o)))
# determine if triples refreshed
self._handle_triple_refresh(auto_refresh) | 882,710 |
remove triple by supplying p,o
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: removes triple from self.rdf.graph | def remove_triple(self, p, o, auto_refresh=True):
self.rdf.graph.remove((self.uri, p, self._handle_object(o)))
# determine if triples refreshed
self._handle_triple_refresh(auto_refresh) | 882,711 |
method to return hierarchical children of this resource
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources | def children(self, as_resources=False):
children = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.ldp.contains, None))]
# if as_resources, issue GET requests for children and return
if as_resources:
logger.debug('retrieving children as resources')
children = [ self.repo.get_resource(c... | 882,714 |
method to return hierarchical parents of this resource
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources | def parents(self, as_resources=False):
parents = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.fedora.hasParent, None))]
# if as_resources, issue GET requests for children and return
if as_resources:
logger.debug('retrieving parent as resource')
parents = [ self.repo.get_resource(par... | 882,715 |
method to return hierarchical siblings of this resource.
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources | def siblings(self, as_resources=False):
siblings = set()
# loop through parents and get children
for parent in self.parents(as_resources=True):
for sibling in parent.children(as_resources=as_resources):
siblings.add(sibling)
# remove self
if as_resources:
siblings.remove(self)
if not as_res... | 882,716 |
method to create a new version of the resource as it currently stands
- Note: this will create a version based on the current live instance of the resource,
not the local version, which might require self.update() to update.
Args:
version_label (str): label to be used for version
Returns:
(ResourceVe... | def create_version(self, version_label):
# create version
version_response = self.repo.api.http_request('POST', '%s/fcr:versions' % self.uri, data=None, headers={'Slug':version_label})
# if 201, assume success
if version_response.status_code == 201:
logger.debug('version created: %s' % version_response... | 882,718 |
retrieves all versions of an object, and stores them at self.versions
Args:
None
Returns:
None: appends instances | def get_versions(self):
# get all versions
versions_response = self.repo.api.http_request('GET', '%s/fcr:versions' % self.uri)
# parse response
versions_graph = self.repo.api.parse_rdf_payload(versions_response.content, versions_response.headers)
# loop through fedora.hasVersion
for version_uri in ve... | 882,719 |
Convenience method to return RDF data for resource,
optionally selecting serialization format.
Inspired by .dump from Samvera.
Args:
format (str): expecting serialization formats accepted by rdflib.serialization(format=) | def dump(self,format='ttl'):
return self.rdf.graph.serialize(format=format).decode('utf-8') | 882,720 |
method to revert resource to this version by issuing PATCH
Args:
None
Returns:
None: sends PATCH request, and refreshes parent resource | def revert_to(self):
# send patch
response = self.resource.repo.api.http_request('PATCH', self.uri)
# if response 204
if response.status_code == 204:
logger.debug('reverting to previous version of resource, %s' % self.uri)
# refresh current resource handle
self._current_resource.refresh()
els... | 882,722 |
method to refresh binary attributes and data
Args:
updated_self (Resource): resource this binary data attaches to
Returns:
None: updates attributes | def refresh(self, updated_self):
logger.debug('refreshing binary attributes')
self.mimetype = updated_self.binary.mimetype
self.data = updated_self.binary.data | 882,726 |
Sets Content-Type header based on headers and/or self.binary.mimetype values
Implicitly favors Content-Type header if set
Args:
None
Returns:
None: sets attributes in self.binary and headers | def _prep_binary_mimetype(self):
# neither present
if not self.mimetype and 'Content-Type' not in self.resource.headers.keys():
raise Exception('to create/update NonRDFSource, mimetype or Content-Type header is required')
# mimetype, no Content-Type
elif self.mimetype and 'Content-Type' not in self.res... | 882,728 |
Sets delivery method of either payload or header
Favors Content-Location header if set
Args:
None
Returns:
None: sets attributes in self.binary and headers | def _prep_binary_content(self):
# nothing present
if not self.data and not self.location and 'Content-Location' not in self.resource.headers.keys():
raise Exception('creating/updating NonRDFSource requires content from self.binary.data, self.binary.location, or the Content-Location header')
elif 'Content... | 882,729 |
method to return a particular byte range from NonRDF resource's binary data
https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
Args:
byte_start(int): position of range start
byte_end(int): position of range end
Returns:
(requests.Response): streamable response | def range(self, byte_start, byte_end, stream=True):
response = self.resource.repo.api.http_request(
'GET',
self.resource.uri,
data=None,
headers={
'Content-Type':self.mimetype,
'Range':'bytes=%s-%s' % (byte_start, byte_end)
},
is_rdf=False,
stream=stream)
# expects 206
if respon... | 882,730 |
Issues fixity check, return parsed graph
Args:
None
Returns:
(dict): ('verdict':(bool): verdict of fixity check, 'premis_graph':(rdflib.Graph): parsed PREMIS graph from check) | def fixity(self, response_format=None):
# if no response_format, use default
if not response_format:
response_format = self.repo.default_serialization
# issue GET request for fixity check
response = self.repo.api.http_request('GET', '%s/fcr:fixity' % self.uri)
# parse
fixity_graph = self.repo.api.... | 882,732 |
Perform a Yelp Phone API Search based on phone number given.
Args:
phone - Phone number to search by
cc - ISO 3166-1 alpha-2 country code. (Optional) | def by_phone(self, phone, cc=None):
header, content = self._http_request(self.BASE_URL, phone=phone, cc=cc)
return json.loads(content) | 882,782 |
Perform a Yelp Neighborhood API Search based on a geopoint.
Args:
lat - geopoint latitude
long - geopoint longitude | def by_geopoint(self, lat, long):
header, content = self._http_request(self.BASE_URL, lat=lat, long=long)
return json.loads(content) | 882,783 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.