content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def get_model_kind(model):
"""Returns the "kind" of the given model.
NOTE: A model's kind is usually, but not always, the same as a model's class
name. Specifically, the kind is different when a model overwrites the
_get_kind() class method. Although Oppia never does this, the Apache Beam
framework uses "kind" to refer to models extensively, so we follow the same
convention and take special care to always return the correct value.
Args:
model: base_models.Model|cloud_datastore_types.Entity. The model to
inspect.
Returns:
bytes. The model's kind.
Raises:
TypeError. When the argument is not a model.
"""
if isinstance(model, base_models.BaseModel) or (
isinstance(model, type) and
issubclass(model, base_models.BaseModel)):
return model._get_kind() # pylint: disable=protected-access
elif isinstance(model, cloud_datastore_types.Entity):
return model.kind
else:
raise TypeError('%r is not a model type or instance' % model)
|
58465fd8d9a7893aeb046b5e05e713a912ff4a2f
| 3,637,039
|
def get_Zvalence_from_pseudo(pseudo):
"""
Extract the number of valence electrons from a pseudo
"""
with open(pseudo.get_file_abs_path(),'r') as f:
lines=f.readlines()
for line in lines:
if 'valence' in line:
try:
return int(float(line.split("z_valence=\""
)[-1].split("\"")[0].strip()))
except (ValueError, IndexError):
try:
return int(float(line.split("Z")[0].strip()))
except (ValueError, IndexError):
return None
|
aade59ef7d9d7d517c19f95d237993433f21ed7a
| 3,637,040
|
import ruptures as rpt
def detect_data_shifts(time_series,
filtering=True, use_default_models=True,
method=None, cost=None, penalty=40):
"""
Detect data shifts in the time series, and return list of dates where these
data shifts occur.
Parameters
----------
time_series : Pandas series with datetime index.
Daily time series of a PV data stream, which can include irradiance
and power data streams. This series represents the summed daily values
of the particular data stream.
filtering : Boolean, default True.
Whether or not to filter out outliers and stale data from the time
series. If True, then this data is filtered out before running the
data shift detection sequence. If False, this data is not filtered
out. Default set to True.
use_default_models: Boolean, default True
If True, then default change point detection search parameters are
used. For time series shorter than 2 years in length, the search
function is `rpt.Window` with `model='rbf'`, `width=40` and
`penalty=30`. For time series 2 years or longer in length, the
search function is `rpt.BottomUp` with `model='rbf'`
and `penalty=40`.
method: ruptures search method instance or None, default None.
Ruptures search method instance. See
https://centre-borelli.github.io/ruptures-docs/user-guide/.
cost: str or None, default None
Cost function passed to the ruptures changepoint search instance.
See https://centre-borelli.github.io/ruptures-docs/user-guide/
penalty: int, default 40
Penalty value passed to the ruptures changepoint detection method.
Default set to 40.
Returns
-------
Pandas Series
Series of boolean values with a datetime index, where detected
changepoints are labeled as True, and all other values are labeled
as False.
.. warning:: If the passed time series is less than 2 years in length,
it will not be corrected for seasonality. Data shift detection will
be run on the min-max normalized time series with no seasonality
correction.
References
-------
.. [1] Perry K., and Muller, M. "Automated shift detection in sensor-based
PV power and irradiance time series", 2022 IEEE 48th Photovoltaic
Specialists Conference (PVSC). Submitted.
"""
try:
except ImportError:
raise ImportError("data_shifts() requires ruptures.")
# Run data checks on cleaned data to make sure that the data can be run
# successfully through the routine
_run_data_checks(time_series)
# Run the filtering sequence, if marked as True
if filtering:
time_series_filtered = _erroneous_filter(time_series)
# Drop any duplicated data from the time series
time_series_filtered = time_series_filtered.drop_duplicates()
# Check if the time series is more than 2 years long. If so, remove
# seasonality. If not, run analysis on the normalized time series
if (time_series_filtered.index.max() -
time_series_filtered.index.min()).days <= 730:
time_series_processed = _preprocess_data(time_series_filtered,
remove_seasonality=False)
seasonality_rmv = False
else:
# Perform pre-processing on the time series, to get the
# seasonality-removed time series.
time_series_processed = _preprocess_data(time_series_filtered,
remove_seasonality=True)
seasonality_rmv = True
points = np.array(time_series_processed.dropna())
# If seasonality has been removed and default model is used, run
# BottomUp method
if (seasonality_rmv) & (use_default_models):
algo = rpt.BottomUp(model='rbf').fit(points)
result = algo.predict(pen=40)
# If there is no seasonality but default model is used, run
# Window-based method
elif (not seasonality_rmv) & (use_default_models):
algo = rpt.Window(model='rbf',
width=50).fit(points)
result = algo.predict(pen=30)
# Otherwise run changepoint detection with the passed parameters
else:
algo = method(model=cost).fit(points)
result = algo.predict(pen=penalty)
# Remove the last index of the time series, if present
if len(points) in result:
result.remove(len(points))
# Return a list of dates where changepoints are detected
time_series_processed.index.name = "datetime"
mask = pd.Series(False, index=time_series_processed.index)
mask.iloc[result] = True
# Re-index the mask to include any timestamps that were
# filtered out as outliers
mask = mask.reindex(time_series.index, fill_value=False)
return mask
|
d924d36a53f965b76943f1a466d3b88649cbe0ef
| 3,637,041
|
def read_rds(filepath):
"""Read an RDS-format matrix into a Pandas dataframe.
Location can be data, scratch, or results.
Index is populated from first column"""
raw_df = pyreadr.read_r(filepath)[None]
if raw_df.isnull().values.any():
raise ValueError("NaN's were found in the data matrix.")
return raw_df.set_index(raw_df.columns[0], drop=True)
|
c4b171638883fc2c3b32397e79a413a9441567f0
| 3,637,042
|
def history():
"""Show history of transactions."""
# Read Transactions database for desired elements
transactions = db.execute("SELECT symbol, share, price, method, timestamp FROM Transactions WHERE id = :uid", uid = session["user_id"])
# Convert prices to 2 decimal places
for transaction in transactions:
transaction["price"] = usd(transaction["price"])
return render_template("history.html", transactions = transactions)
|
5eac4a49c473467db851fe2ea6e58b29cc1a9bfe
| 3,637,043
|
from typing import get_args
def generate_args(job_name, common, cloud_provider, image, k8s_version,
test_suite, job):
"""Returns a list of args fetched from the given fields."""
args = []
args.extend(get_args(job_name, common))
args.extend(get_args(job_name, cloud_provider))
args.extend(get_args(job_name, image))
args.extend(get_args(job_name, k8s_version))
args.extend(get_args(job_name, test_suite))
args.extend(get_args(job_name, job))
return args
|
7f53dcf66269b0d14f9fad1c1079cf1716529f09
| 3,637,045
|
import math
def isPrime(n):
"""
check is Prime,for positive integer.
使用试除法
"""
if n <= 1:
return False
if n == 2:
return True
i = 2
thres = math.ceil(math.sqrt(n))
while i <= thres:
if n % i == 0:
return False
i += 1
return True
|
458775fbd324dc976c91a035898b3122e6bc1109
| 3,637,046
|
from typing import Tuple
import torch
def reconstruction_loss(loss_type: str,
in_dim: Tuple[int],
x: torch.Tensor,
x_reconstr: torch.Tensor,
logits: bool = True,
) -> torch.Tensor:
"""
Computes reconstruction loss (mse or cross-entropy)
without mean reduction (used in VAE objectives)
"""
batch_dim = x.size(0)
if loss_type == "mse":
reconstr_loss = 0.5 * torch.sum(
(x_reconstr.reshape(batch_dim, -1) - x.reshape(batch_dim, -1))**2, 1)
elif loss_type == "ce":
rs = (np.product(in_dim[:2]),)
if len(in_dim) == 3:
rs = rs + (in_dim[-1],)
xe = (F.binary_cross_entropy_with_logits if
logits else F.binary_cross_entropy)
reconstr_loss = xe(x_reconstr.reshape(-1, *rs), x.reshape(-1, *rs),
reduction='none').sum(-1)
else:
raise NotImplementedError("Reconstruction loss must be 'mse' or 'ce'")
return reconstr_loss
|
30dbd75eddbc7f2d0994f867e2f9492b24f707b1
| 3,637,047
|
def NOR(*variables):
"""NOR.
Return the boolean expression for the OR of the variables. Equivalent to
``NOT(OR(*variables))``.
Parameters
----------
*variables : arguments.
``variables`` can be of arbitrary length. Each variable can be a
hashable object, which is the label of the boolean variable, or a dict
(or subclass of dict) representing a boolean expression.
Return
------
P : ``qubovert.PUBO`` object or same type as ``type(variables[0])``.
The boolean expression for the logic operation.
If ``variables[0]`` is a ``qubovert.QUBO``, ``qubovert.PCBO``,
``qubovert.utils.QUBOMatrix``, or ``qubovert.utils.PUBOMatrix`` object,
then ``type(P) == type(variables[0])``. Otherwise,
``type(P) == type(variables[0])``.
Example
-------
>>> from qubovert.sat import NOR
>>> P = NOR(0, 1)
>>> P
{(0,): -1, (0, 1): 1, (1,): -1, (): 1}
>>> P.value({0: 0, 1: 0})
1
>>> P.value({0: 0, 1: 1})
0
>>> P.value({0: 1, 1: 0})
0
>>> P.value({0: 1, 1: 1})
0
>>> type(P)
qubovert._pubo.PUBO
>>> P = NOR({(0, 1): 1}, 'x') # nor of 0, 1, and 'x'.
>>> P
{(0, 1): -1, (0, 1, 'x'): 1, ('x',): -1, (): 1}
>>> type(P)
qubovert._pubo.PUBO
>>> from qubovert import boolean_var
>>> x, y = boolean_var('x'), boolean_var('y')
>>> P = NOR(x, y)
>>> type(P)
qubovert.PCBO
"""
return NOT(OR(*variables))
|
e3b9d5eb3c167ac04de66609828583bb5eeb7004
| 3,637,048
|
import io
import time
def timing_run(args, shell: bool = False, stdin=None, stdout=None, stderr=None,
environ=None, cwd=None, resources=None, identification=None, shuffle=False) -> RunResult:
"""
Create an timing process with stream
:param args: arguments for execution
:param shell: use shell to execute args
:param stdin: stdin stream (none means nothing)
:param stdout: stdout stream (none means nothing)
:param stderr: stderr stream (none means nothing)
:param environ: environment variables
:param cwd: new work dir
:param resources: resource limit
:param identification: user and group for execution
:param shuffle: Shuffle the inputs with similar timestamp.
:return: run result of this time
"""
stdin_need_close = not stdin
stdin = stdin or io.BytesIO()
stdout_need_close = not stdout
stdout = stdout or io.BytesIO()
stderr_need_close = not stderr
stderr = stderr or io.BytesIO()
with eclosing(stdin, stdin_need_close) as stdin, \
eclosing(stdout, stdout_need_close) as stdout, \
eclosing(stderr, stderr_need_close) as stderr:
_stdin = TimingStdin.loads(_try_read_to_bytes(stdin))
if shuffle:
_stdin = _stdin.to_shuffled()
with interactive_process(
args=args, shell=shell,
environ=environ, cwd=cwd,
resources=resources, identification=identification,
) as ip:
for _time, _line in _stdin.lines:
_target_time = ip.start_time + _time
while time.time() < _target_time and not ip.completed:
time.sleep(max(min(0.2, _target_time - time.time()), 0.0))
try:
ip.print_stdin(_line)
except BrokenPipeError:
break
ip.close_stdin()
_stdout, _stderr = [], []
for _time, _tag, _line in ip.output_yield:
if _tag == 'stdout':
_stdout.append((_time, _line))
elif _tag == 'stderr':
_stderr.append((_time, _line))
else:
raise ValueError('Unknown output type - {type}.'.format(type=repr(_time))) # pragma: no cover
ip.join()
_try_write(stdout, TimingStdout.loads(_stdout).dumps())
_try_write(stderr, TimingStderr.loads(_stderr).dumps())
return ip.result
|
936d9611769dc5e04381131cd7bf18be73580bb3
| 3,637,049
|
def alterMethods(cls):
"""
Alter Monte methods on behalf of AutoHelp.
Return the signatures of the altered methods.
NOT_RPYTHON
"""
atoms = []
imports = set()
def nextName(nameIndex=[0]):
name = "_%d" % nameIndex[0]
nameIndex[0] += 1
return name
execNames = {"Refused": Refused}
dispatchClauses = []
d = {}
# Walk the MRO and harvest Monte methods. The repacker has already placed
# them in the correct location.
for c in reversed(cls.__mro__):
if hasattr(c, "_monteMethods_"):
d.update(c._monteMethods_)
for attr, (f, verb, args, kwargs, rv) in d.iteritems():
# The verb is now Unicode.
verb = verb.decode("utf-8")
assignments = []
if isStarArgs(args):
atomTest = "atom.verb == %r" % verb
call = "self.%s(args)" % attr
else:
atomName = nextName()
execNames[atomName] = atom = getAtom(verb, len(args))
atoms.append(atom)
atomTest = "atom is %s" % atomName
argNames = []
for i, arg in enumerate(args):
argName = nextName()
argNames.append(argName)
assignments.append("%s = args[%d]" % (argName, i))
if arg != "Any":
unwrapperModule = wrappers[arg]
pred = "is" + arg
imports.add("from %s import %s" % (unwrapperModule, pred))
atomTest += " and %s(args[%d])" % (pred, i)
unwrapper = "unwrap" + arg
imports.add("from %s import %s" % (unwrapperModule,
unwrapper))
assignments.append("%s = %s(%s)" % (argName, unwrapper,
argName))
for k, v in kwargs.iteritems():
kwargName = nextName()
argNames.append("%s=%s" % (k, kwargName))
assignments.append("%s = namedArgs.extractStringKey(%r, None)"
% (kwargName, k.decode("utf-8")))
if v != "Any":
unwrapperModule = wrappers[v]
unwrapper = "unwrap" + v
imports.add("from %s import %s" % (unwrapperModule,
unwrapper))
assignments.append("%s = %s(%s) if %s is None else None" %
(kwargName, unwrapper, kwargName, kwargName))
call = "self.%s(%s)" % (attr, ",".join(argNames))
retvals = []
if rv == "Any":
# No wrapping.
retvals.append("return rv")
elif rv == "Void":
# Enforced correctness. Disobedience will not be tolerated.
retvals.append("assert rv is None, 'habanero'")
retvals.append("from typhon.objects.constants import NullObject")
retvals.append("return NullObject")
else:
wrapperModule = wrappers[rv]
wrapper = "wrap" + rv
imports.add("from %s import %s" % (wrapperModule, wrapper))
retvals.append("return %s(rv)" % wrapper)
dispatchClauses.append("""
if %s:
%s
rv = %s
%s
""" % (atomTest, ";".join(assignments), call, ";".join(retvals)))
setattr(cls, attr, f)
# Temporary. Soon, all classes shall receive AutoHelp, and no class will
# have a handwritten recv().
if dispatchClauses:
exec py.code.Source("""
def recvNamed(self, atom, args, namedArgs):
%s
%s
rv = self.mirandaMethods(atom, args, namedArgs)
if rv is None:
raise Refused(self, atom, args)
else:
return rv
""" % (";".join(imports), "\n".join(dispatchClauses))).compile() in execNames
cls.recvNamed = execNames["recvNamed"]
return atoms
|
9c1dcbda1a96196bdde3f31563d53f8c2be6eeb1
| 3,637,050
|
from typing import List
from typing import Union
def make_multiclouds(docs: List[Union[dict, object, str, tuple]],
opts: dict = None,
ncols: int = 3,
title: str = None,
labels: List[str] = None,
show: bool = True,
figure_opts: dict = None,
round: int = None
):
"""Make multiclouds.
Accepts data from a string, list of lists or tuples, a dict with
terms as keys and counts/frequencies as values, or a dataframe.
The best input is a dtm produced by `get_dtm_table()`.
Args:
docs (List[Union[dict, object, str, tuple]]): The data. Accepts a list of text strings, a list of tuples,
or dicts with the terms as keys and the counts/frequencies as values, or a dataframe with "term" and
"count" or "frequency" columns.
opts (dict): The WordCloud() options.
For testing, try {"background_color": "white", "max_words": 2000, "contour_width": 3, "contour_width": "steelblue"}
ncols (int): The number of columns in the grid.
title (str): The title of the grid.
labels (List[str]): The document labels for each subplot.
show (bool): Whether to show the plotted word cloud or return it as a WordCloud object.
figure_opts (dict): A dict of matplotlib figure options.
round (int): An integer (generally between 100-300) to apply a mask that rounds the word cloud.
Returns:
object: A WordCloud object if show is set to False.
Notes:
- For a full list of options, see https://amueller.github.io/word_cloud/generated/wordcloud.WordCloud.html#wordcloud-wordcloud.
- If `show=False` the function expects to be called with something like `wordcloud = make_wordcloud(data, show=False)`.
This returns WordCloud object which can be manipulated by any of its methods, such as `to_file()`. See the
WordCloud documentation for a list of methods.
"""
# Process the docs data into a list
if isinstance(docs, pd.core.frame.DataFrame):
# Assumes a df with columns: Terms, Doc_Label, DocLabel,...
# Transpose the df
docs = docs.T
# Grab the first row for the header
new_header = docs.iloc[0]
# Drop the first row
docs = docs[1:]
# Set the header row as the df header
docs.columns = new_header
# Return a dict
docs = docs.to_dict(orient="records")
# Ensure that anything that is not a list of strings is converted
# to the appropriate format.
elif isinstance(docs, list):
if all(isinstance(s, str) for s in docs):
pass
else:
docs = [{x[0:1]: x[1:2] for x in data} for data in docs]
# List for multiple word clouds if they are to be returned.
multiclouds = []
# Create a rounded mask.
if round:
x, y = np.ogrid[:300, :300]
mask = (x - 150) ** 2 + (y - 150) ** 2 > round ** 2
mask = 255 * mask.astype(int)
opts["mask"] = mask
# Constrain the layout
figure_opts["constrained_layout"] = True
# Create the figure.
fig = plt.figure(**figure_opts)
# Add the title
if title:
fig.suptitle(title)
# Calculate the number of rows and columns.
nrows = int(np.ceil(len(docs) / ncols))
spec = fig.add_gridspec(nrows, ncols)
# Divide the data into rows.
rows = list(get_rows(docs, ncols))
# Set an index for labels
i = 0
# Loop through the rows.
for row, doc in enumerate(rows):
# Loop through the documents in the row.
for col, data in enumerate(doc):
# Create a subplot.
ax = fig.add_subplot(spec[row, col])
# Generate the subplot's word cloud.
if isinstance(data, str):
wordcloud = WordCloud(**opts).generate_from_text(data)
else:
wordcloud = WordCloud(**opts).generate_from_frequencies(data)
# If `show=True`, show the word cloud.
if show:
ax.imshow(wordcloud)
ax.axis("off")
# Set the image title from the label
if labels:
ax.set_title(labels[i])
i += 1
# Otherwise, add the word cloud to the multiclouds list.
else:
multiclouds.append(wordcloud)
# If `show=False`, return the multiclouds list.
if not show:
return multiclouds
|
9c1f6363d1cc6cd0e20591c1ab54b1761414d29c
| 3,637,051
|
def action_prop(param, val=1):
"""A param that performs an action"""
def fdo(self):
self.setter(param, val)
return fdo
|
6a4f6e7e178e62755113d6b93a59534675dfa2dd
| 3,637,052
|
def find_or_create(find, create):
"""Given a find and a create function, create a resource if it doesn't exist"""
result = find()
return result if result else create()
|
ffe608bf2da1b83d662b93266f4309976424300f
| 3,637,053
|
import math
def Gsigma(sigma):
"""Pickle a gaussian function G(x) for given sigma"""
def G(x):
return (math.e ** (-(x**2)/(2*sigma**2)))/(2 * math.pi* sigma**2)**0.5
return G
|
77eac3ca8b6ced0063074527b83c50e8681f980d
| 3,637,054
|
from indico.modules.events.contributions.ical import generate_contribution_component
def session_to_ical(session, detailed=False):
"""Serialize a session into an iCal.
:param session: The session to serialize
:param detailed: If True, iCal will include the session's contributions
"""
calendar = icalendar.Calendar()
calendar.add('version', '2.0')
calendar.add('prodid', '-//CERN//INDICO//EN')
related_event_uid = f'indico-event-{session.event.id}@{url_parse(config.BASE_URL).host}'
if not detailed:
component = generate_session_component(session, related_event_uid)
calendar.add_component(component)
else:
contributions = (Contribution.query.with_parent(session)
.filter(Contribution.is_scheduled)
.all())
components = [generate_contribution_component(contribution, related_event_uid)
for contribution in contributions]
for component in components:
calendar.add_component(component)
return calendar.to_ical()
|
9f0cb5a5ce6f31c6690b71948fbe6e8eeb2f7080
| 3,637,055
|
def _normalize_hosts(hosts):
"""
Helper function to transform hosts argument to
:class:`~elasticsearch.Elasticsearch` to a list of dicts.
"""
# if hosts are empty, just defer to defaults down the line
if hosts is None:
return [{}]
# passed in just one string
if isinstance(hosts, string_types):
hosts = [hosts]
out = []
# normalize hosts to dicts
for host in hosts:
if isinstance(host, string_types):
if "://" not in host:
host = "//%s" % host
parsed_url = urlparse(host)
h = {"host": parsed_url.hostname}
if parsed_url.port:
h["port"] = parsed_url.port
if parsed_url.scheme == "https":
h["port"] = parsed_url.port or 443
h["use_ssl"] = True
if parsed_url.username or parsed_url.password:
h["http_auth"] = "%s:%s" % (
unquote(parsed_url.username),
unquote(parsed_url.password),
)
if parsed_url.path and parsed_url.path != "/":
h["url_prefix"] = parsed_url.path
out.append(h)
else:
out.append(host)
return out
|
ef3a6cfadd6a297f31afdfec4b8a77a0f88cd08f
| 3,637,056
|
def data(self: Client) -> DataProxy:
"""Delegates to a
:py:class:`mcipc.rcon.je.commands.data.DataProxy`
"""
return DataProxy(self, 'data')
|
072806ad6f27e8bd645bd04cf34619946a83bf06
| 3,637,057
|
def proximal_policy_optimization_loss(advantage, old_prediction, loss_clipping=0.2, entropy_loss=5e-3):
"""
https://github.com/LuEE-C/PPO-Keras/blob/master/Main.py
# Only implemented clipping for the surrogate loss, paper said it was best
:param advantage:
:param old_prediction:
:param loss_clipping:
:param entropy_loss:
:return:
"""
def loss(y_true, y_pred):
prob = K.sum(y_true * y_pred, axis=-1) # Multiply with the one hot encoded taken action
old_prob = K.sum(y_true * old_prediction, axis=-1)
r = prob / (old_prob + 1e-10)
return -K.mean(K.minimum(r * advantage, K.clip(
r, min_value=1 - loss_clipping, max_value=1 + loss_clipping) * advantage) + entropy_loss * -(
prob * K.log(prob + 1e-10)))
return loss
|
ca7e1a602a6da6236fbd85facb373fa623fc62d5
| 3,637,058
|
import re
def tokenize_string(string):
"""Split a string up into analyzable characters.
Returns a list of individual characters that can
then be matched with the regex patterns.
Note that all accent characters can be found with
the range: \u0300-\u036F. Thus, strings are split
by [any_character][any_accent]*.
"""
norm_string = normalize_string(string)
return re.findall('.[\u0300-\u036F]*', norm_string)
|
f3757e190f99d3430dee17ca51ea6a6d7fa70ff9
| 3,637,059
|
def compute_final_metrics(source_waveforms, separated_waveforms, mixture_waveform):
"""Permutation-invariant SI-SNR, powers, and under/equal/over-separation."""
perm_inv_loss = wrap(lambda tar, est: -signal_to_noise_ratio_gain_invariant(est, tar))
_, separated_waveforms = perm_inv_loss(source_waveforms,separated_waveforms)
# Compute separated and source powers.
power_separated = tf.reduce_mean(separated_waveforms ** 2, axis=-1)
power_sources = tf.reduce_mean(source_waveforms ** 2, axis=-1)
# Compute weights for active (separated, source) pairs where source is nonzero
# and separated power is above threshold of quietest source power - 20 dB.
weights_active_refs = _weights_for_nonzero_refs(source_waveforms)
weights_active_seps = _weights_for_active_seps(
tf.boolean_mask(power_sources, weights_active_refs), power_separated)
weights_active_pairs = tf.logical_and(weights_active_refs,
weights_active_seps)
# Compute SI-SNR.
sisnr_separated = signal_to_noise_ratio_gain_invariant(separated_waveforms, source_waveforms)
num_active_refs = tf.math.reduce_sum(tf.cast(weights_active_refs, tf.int32))
num_active_seps = tf.math.reduce_sum(tf.cast(weights_active_seps, tf.int32))
num_active_pairs = tf.math.reduce_sum(tf.cast(weights_active_pairs, tf.int32))
sisnr_mixture = signal_to_noise_ratio_gain_invariant(
tf.tile(mixture_waveform, (1,source_waveforms.shape[1], 1)),source_waveforms)
# Compute under/equal/over separation.
under_separation = tf.cast(tf.less(num_active_seps, num_active_refs),
tf.float32)
equal_separation = tf.cast(tf.equal(num_active_seps, num_active_refs),
tf.float32)
over_separation = tf.cast(tf.greater(num_active_seps, num_active_refs),
tf.float32)
return {'sisnr_separated': sisnr_separated,
'sisnr_mixture': sisnr_mixture,
'sisnr_improvement': sisnr_separated - sisnr_mixture,
'power_separated': power_separated,
'power_sources': power_sources,
'under_separation': under_separation,
'equal_separation': equal_separation,
'over_separation': over_separation,
'weights_active_refs': weights_active_refs,
'weights_active_seps': weights_active_seps,
'weights_active_pairs': weights_active_pairs,
'num_active_refs': num_active_refs,
'num_active_seps': num_active_seps,
'num_active_pairs': num_active_pairs}
|
3e7a6a52b8a26c4a4fa7fec9de17559617e4d467
| 3,637,060
|
import numpy
import pandas
def gen_sdc_pandas_series_rolling_impl(pop, put, get_result=result_or_nan,
init_result=numpy.nan):
"""Generate series rolling methods implementations based on pop/put funcs"""
def impl(self):
win = self._window
minp = self._min_periods
input_series = self._data
input_arr = input_series._data
length = len(input_arr)
output_arr = numpy.empty(length, dtype=float64)
chunks = parallel_chunks(length)
for i in prange(len(chunks)):
chunk = chunks[i]
nfinite = 0
result = init_result
if win == 0:
for idx in range(chunk.start, chunk.stop):
output_arr[idx] = get_result(nfinite, minp, result)
continue
prelude_start = max(0, chunk.start - win + 1)
prelude_stop = chunk.start
interlude_start = prelude_stop
interlude_stop = min(prelude_start + win, chunk.stop)
for idx in range(prelude_start, prelude_stop):
value = input_arr[idx]
nfinite, result = put(value, nfinite, result)
for idx in range(interlude_start, interlude_stop):
value = input_arr[idx]
nfinite, result = put(value, nfinite, result)
output_arr[idx] = get_result(nfinite, minp, result)
for idx in range(interlude_stop, chunk.stop):
put_value = input_arr[idx]
pop_value = input_arr[idx - win]
nfinite, result = put(put_value, nfinite, result)
nfinite, result = pop(pop_value, nfinite, result)
output_arr[idx] = get_result(nfinite, minp, result)
return pandas.Series(output_arr, input_series._index,
name=input_series._name)
return impl
|
8fb25c10e862d21af75b244053ac96075c1efa19
| 3,637,061
|
def gen_random_colors(num_groups, colors=None):
"""
Generates random colors.
Parameters
----------
num_groups : int
The number of groups for which colors should be generated.
colors : list : optional (contains strs)
Hex based colors that should be appended if not enough have been provided.
Returns
-------
colors or colors + new_colors : list (contains strs)
Randomly generated colors for figures and plotting.
"""
if colors is None:
colors = []
if len(colors) < num_groups:
while len(colors) < num_groups:
cryptogen = SystemRandom()
random_rgba = [cryptogen.random() for i in range(4)]
colors.append(random_rgba)
sns.set_palette(colors)
if isinstance(colors[0][0], float):
# Convert over for non-sns use.
colors = [mpl.colors.to_hex([c[0], c[1], c[2]]).upper() for c in colors]
return colors
|
462835c6bacd5024ac20bab960d2c2e9d95e4dab
| 3,637,062
|
def build_graph(sorted_sequence):
"""
Each node points to a list of the nodes that are reacheable from it.
"""
elements = set(sorted_sequence)
graph = defaultdict(lambda : [])
for element in sorted_sequence:
for i in [1, 2, 3]:
if element + i in elements:
graph[element].append(element + i)
return graph
|
a14d2278909df459856e23c7073d551b354f258d
| 3,637,065
|
from numpy import std
def _findCentralBond(mol, distmat):
""" Helper function to identify the atoms of the most central bond.
Arguments:
- mol: the molecule of interest
- distmat: distance matrix of the molecule
Return: atom indices of the two most central atoms (in order)
"""
# get the most central atom = atom with the least STD of shortest distances
stds = []
for i in range(mol.GetNumAtoms()):
# only consider non-terminal atoms
if len(_getHeavyAtomNeighbors(mol.GetAtomWithIdx(i))) < 2:
continue
tmp = [d for d in distmat[i]]
tmp.pop(i)
stds.append((std(tmp), i))
stds.sort()
aid1 = stds[0][1]
# find the second most central bond that is bonded to aid1
i = 1
while 1:
if mol.GetBondBetweenAtoms(aid1, stds[i][1]) is None:
i += 1
else:
aid2 = stds[i][1]
break
return aid1, aid2 # most central atom comes first
|
bbaca8c48bf8c5e1a5d2ffa317448f05235c834e
| 3,637,066
|
def transform(data, transformer):
"""This hook defines how DataRobot will use the trained object from fit() to transform new data.
DataRobot runs this hook when the task is used for scoring inside a blueprint.
As an output, this hook is expected to return the transformed data.
The input parameters are passed by DataRobot based on dataset and blueprint configuration.
Parameters
-------
data: pd.DataFrame
Data that DataRobot passes for transformation.
transformer: Any
Trained object, extracted by DataRobot from the artifact created inside fit().
In this example, it's a function
Returns
-------
pd.DataFrame
Returns a dataframe with transformed data.
"""
return data.apply(transformer)
|
b52577c0b2a3f3edb1297dcf9c567f9845f04bd5
| 3,637,067
|
import asyncio
import base64
async def sign_params(params, certificate_file, private_key_file):
"""
Signs params adding client_secret key, containing signature based on `scope`, `timestamp`, `client_id` and `state`
keys values.
:param dict params: requests parameters
:param str certificate_file: path to certificate file
:param str private_key_file: path to private key file
:return:signed request parameters
:rtype: dict
"""
plaintext = ''.join([
params.get(key, '') for key in ['scope', 'timestamp', 'client_id', 'state']
])
cmd = 'openssl smime -sign -md md_gost12_256 -signer {cert} -inkey {key} -outform DER'.format(
cert=certificate_file,
key=private_key_file
)
proc = await asyncio.create_subprocess_shell(
cmd,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT,
)
stdout, stderr = await proc.communicate(input=plaintext.encode())
if proc.returncode != 0:
raise OpenSSLError
client_secret=base64.urlsafe_b64encode(stdout).decode('utf-8'),
return {**params, 'client_secret': client_secret}
|
be9980e5fb0b60da8a21c77b4ac7c9795560b557
| 3,637,068
|
def sum_of_fourth_powers(matrix):
"""
:param matrix: (numpy.ndarray) A numpy array.
:return: The fourth power of the four-norm of the matrix. In other words,
the sum of the fourth power of all of its entries.
"""
squared_entries = matrix * matrix
return np.sum(squared_entries * squared_entries)
|
51039a259594205a88b223b1e3d8387e05581c0f
| 3,637,069
|
from typing import Dict
def key_in_direction(start: Key, direction: str, keypad: Keypad) -> Key:
"""
Return the value of the key in the given direction.
"""
row = next(r for r in keypad if start in r)
x_pos = row.index(start)
col = [c[x_pos] for c in keypad]
y_pos = col.index(start)
directions: Dict[str, Key] = {
"U": col[max(0, y_pos - 1)],
"D": col[min(y_pos + 1, len(col) - 1)],
"L": row[max(0, x_pos - 1)],
"R": row[min(x_pos + 1, len(row) - 1)],
}
return directions[direction] or start
|
c0a8909517ec1de29325d0acc18e0c8968bda3b5
| 3,637,070
|
def vectorize_args(nums):
"""
Decorator for vectorization of arguments of a function.
The positions of the arguments are given in the tuple nums.
See numpy.vectorize.
"""
def wrap(func):
@wraps(func)
def wrapped(*args, ** kwargs):
args = list(args)
for i, arg in enumerate(args):
if i in nums and type(arg) == list:
args[i] = np.array(arg)
for i, arg in enumerate(args):
if i in nums and type(arg) == np.ndarray:
shape = np.shape(arg)
ind = np.transpose(np.ones(shape).nonzero())
break
if i == len(args) - 1:
# no need for vectorization as all relevant
# arguments are scalars
return func(*args, ** kwargs)
res = np.array([func(
* [arg[tuple(j)] if type(arg) == np.ndarray and i in nums else arg for i, arg in enumerate(args)], ** kwargs)
for j in ind])
if np.shape(res) <> shape:
# func returns more than 1 result, this means the array has to
# be ordered differently
res = res.transpose()
if len(shape) > 1:
# more than 1D arrays, the shape of the list has to be rearanged
res = res.reshape((res.shape[0],) + shape)
return res
return wrapped
return wrap
|
cd9b13bdcd26f1c74a2eaa18396ebfb11ed02446
| 3,637,071
|
def parse_lambda_config(x):
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease
# to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000
# iterations, then will linearly increase to 1 until iteration 2000
"""
if isinstance(x, float):
return x, None
split = x.split(',')
if len(split) == 1:
return float(x), None
else:
split = [s.split(':') for s in split]
assert all(len(s) == 2 for s in split)
assert all(k.isdigit() for k, _ in split)
assert all(int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1))
return float(split[0][1]), [(int(k), float(v)) for k, v in split]
|
d85980c2efd46284de8e939f42ef4f5dd49dfd73
| 3,637,072
|
def format_cols(colname, direction='in'):
"""Formats columns beween human-readable and pandorable
Keyword arguments:
real -- the real part (default 0.0)
imag -- the imaginary part (default 0.0)
"""
if imag == 0.0 and real == 0.0:
return complex_zero
...
if direction == 'in':
return (colname
.lower()
.replace(' ', '_')
.replace('(', '')
.replace(')', '')
)
elif direction == 'out':
return (colname.replace('_', ' ')
.title()
)
raise ValueError('Direction must be "in" or "out"')
|
a61dbedb2e08c4de03c719c4daff10de41e19304
| 3,637,073
|
def convert_decimal_to_binary(number):
"""
Parameters
----------
number: int
Returns
-------
out: str
>>> convert_decimal_to_binary(10)
'1010'
"""
return bin(number)[2:]
|
01a9be2e70c87091adc1d85759075668da9270f2
| 3,637,074
|
from typing import Optional
import pathlib
import tarfile
def fetch_tgz(
dataname: str,
urlname: str,
subfolder: Optional[str] = None,
data_home: Optional[str] = None,
) -> pathlib.Path:
"""Fetch tgz dataset.
Fetch a tgz file from a given url, unzips and stores it in a given
directory.
Parameters
----------
dataname: string
Dataset name.
urlname: string
Dataset url.
subfolder: string, default=None
The subfolder where to put the data, if any.
data_home: string, default=None
Dataset directory. If None, use the default of scikit-learn.
Returns
-------
data_home: Path
Directory.
"""
return fetch_compressed(
dataname=dataname,
urlname=urlname,
compression_open=tarfile.open,
subfolder=subfolder,
data_home=data_home,
open_format='r:gz',
)
|
00c4f91a657e37767a43b3af0766b5b407144617
| 3,637,075
|
def choisir_action():
"""Choisir action de cryptage ou de décryptage
Entree : -
Sortie: True pour cryptage, False pour décryptage"""
action_est_crypter = True
action = input("Quelle est l'action, crypter ou décrypter ? \n<Entrée> pour crypter, autre touche pour decrypter, ou <Crtl> + Z ou X pour arréter.\n")
if action : action_est_crypter = False
return action_est_crypter
|
c0bceb748afb1fc32b865136c4a477f06a6412b2
| 3,637,076
|
def σ(u, p, μ):
"""Stress tensor of isotropic Newtonian fluid.
σ = 2 μ (symm ∇)(u) - p I
This method returns a UFL expression the whole stress tensor. If you want
to plot, extract and interpolate or project what you need. For example,
to plot the von Mises stress::
from dolfin import tr, Identity, sqrt, inner
from fenics import project, plot
# scalar function space
W = V.sub(0).collapse() # use the space of the first comp. of `V`
# W = FunctionSpace(mesh, 'P', 2) # or create your own space
def dev(T):
'''Deviatoric (traceless) part of rank-2 tensor `T`.
This assumes, for 2D, that `T` is actually 3D, but
the third row and column of `T` are zero.
'''
return T - (1 / 3) * tr(T) * Identity(T.geometric_dimension())
# `solver._μ` is the UFL `Constant` object
σ = σ(solver.u_, solver.p_, solver._μ)
s = dev(σ)
vonMises = sqrt(3 / 2 * inner(s, s))
plot(project(vonMises, W))
"""
return 2 * μ * ε(u) - p * Identity(p.geometric_dimension())
|
03f61ea7c128503ee930714107a8f7a007641cee
| 3,637,077
|
async def cycle(command: Command, switches: PowerSwitch, name: str, portnum: int):
"""cycle power to an Outlet"""
command.info(text=f"Cycle port {name}...")
for switch in switches:
current_status = await switch.statusAsJson(name, portnum)
if current_status:
break
# print(current_status)
# status |= await switch.statusAsJson(name, portnum) works only with python 3.9
# current_status = await switch.statusAsJson(name, portnum)
try:
# off
if current_status[name]["STATE"] == 1:
current_status = await switch_control(
"cycle", switches, False, name, portnum
)
elif current_status[name]["STATE"] == 0:
return command.fail(text=f"The Outlet {name} is OFF")
else:
return command.fail(text=f"The Outlet {name} returns wrong value")
except PowerException as ex:
return command.fail(error=str(ex))
return command.finish(text="done")
|
7b5a17eaeecb4d8f1072f014de716bb1bb95dc97
| 3,637,078
|
def zk_delete_working_node(zk_client, server):
"""删除服务节点"""
node_path, root_path = get_path_to_current_working_node(server)
zk_client.ensure_path(root_path)
result = zk_client.delete(node_path, ephemeral=True)
return result
|
45effe39d8cd5eb22742c6eed19984ae40b0e192
| 3,637,079
|
import torch
def construct_filters_from_2d(matrix, filter_starts, decomp_level):
"""
construct the filters in the proper shape for the DWT inverse forward step
Parameters
----------
matrix
filter_starts
decomp_level
Returns
-------
"""
exp = filter_starts[0]
low = matrix[: exp ** 2].reshape((exp, exp, matrix.shape[-1]))
low = low.permute(2, 0, 1).unsqueeze(0)
highs = []
last_end = exp ** 2
for lvl in range(decomp_level):
exp = filter_starts[lvl]
lp_list = [None, None, None]
for i in range(1, 4):
next_end = last_end + exp ** 2
lp_list[i - 1] = (
matrix[last_end:next_end]
.reshape((exp, exp, matrix.shape[-1]))
.permute(2, 0, 1)
.unsqueeze(0)
.unsqueeze(2)
)
last_end = next_end
highs.append(torch.cat(lp_list, dim=2))
highs.reverse()
return low, highs
|
10411e774dc654586cd9b88b40e405b695a12919
| 3,637,080
|
def minpoly(firstterms):
"""
Return the minimal polynomial having at most degree n of of the
linearly recurrent sequence whose first 2n terms are given.
"""
field = ring.getRing(firstterms[0])
r_0 = uniutil.polynomial({len(firstterms):field.one}, field)
r_1 = uniutil.polynomial(enumerate(reversed(firstterms)), field)
poly_ring = r_0.getRing()
v_0 = poly_ring.zero
v_1 = poly_ring.one
n = len(firstterms) // 2
while n <= r_1.degree():
q, r = divmod(r_0, r_1)
v_0, v_1 = v_1, v_0 - q*v_1
r_0, r_1 = r_1, r
return v_1.scalar_mul(v_1.leading_coefficient().inverse())
|
8cad899aa40859884b4cdbe01b0734de84782804
| 3,637,081
|
def scale_gradient(tensor, scale):
"""Scales the gradient for the backward pass."""
return tf.add(tensor * scale ,tf.stop_gradient(tensor) * (1 - scale))
|
e3ea3a7baf06ebab5de0510ea13260e89b9397ca
| 3,637,082
|
import ast
import random
def t_rename_local_variables(the_ast, all_sites=False):
"""
Local variables get replaced by holes.
"""
changed = False
candidates = []
for node in ast.walk(the_ast):
if isinstance(node, ast.Name) and isinstance(node.ctx, ast.Store):
if node.id not in [ c.id for c in candidates ]:
# print(node.id, node.lineno)
candidates.append(node)
if len(candidates) == 0:
return False, the_ast
if not all_sites:
selected = [random.choice(candidates)]
else:
selected = candidates
local_var_defs = {}
for cnt, s in enumerate(selected, start=1):
local_var_defs[s.id] = cnt
to_rename = []
for node in ast.walk(the_ast):
if isinstance(node, ast.Name) and node.id in local_var_defs:
to_rename.append((node, local_var_defs[node.id]))
for node, idx in to_rename:
changed = True
node.id = 'VAR' + str(idx)
return changed, the_ast
|
8faeea81faac55d5d45b897776cd87cb508404a5
| 3,637,084
|
from typing import List
def get_scale(notes: List[str]) -> int:
"""Convert a list of notes to a scale constant.
# Args
- *notes*: list of notes in the scale. This should be a list of string
where each string is a note ABC notation. Sharps should be
represented with a pound sign preceding the note e.g. '#A' and flats
should be represented with a lower case b preceding the note e.g. 'bB'.
# Returns
An integer mask used to represent a musical key or scale as an argument to
any of the MusicalHash methods.
# Raises
A ValueError if an invalid string is included in the input list.
"""
note_map = {'A': 0x1,
'#A': 0x2, 'bB': 0x2,
'B': 0x4,
'C': 0x8,
'#C': 0x10, 'bD': 0x10,
'D': 0x20,
'#D': 0x40, 'bE': 0x40,
'E': 0x80,
'F': 0x100,
'#F': 0x200, 'bG': 0x200,
'G': 0x400,
'#G': 0x800, 'bA': 0x800}
scale = 0x0
for note in notes:
try:
scale |= note_map[note]
except KeyError:
raise ValueError(
'The string {} is not a valid musical note'.format(note))
return scale
|
91cbcc7bfa05df52adf741b85f78beeabf819966
| 3,637,085
|
import math
def slurm_format_bytes_ceil(n):
""" Format bytes as text.
SLURM expects KiB, MiB or Gib, but names it KB, MB, GB. SLURM does not handle Bytes, only starts at KB.
>>> slurm_format_bytes_ceil(1)
'1K'
>>> slurm_format_bytes_ceil(1234)
'2K'
>>> slurm_format_bytes_ceil(12345678)
'13M'
>>> slurm_format_bytes_ceil(1234567890)
'2G'
>>> slurm_format_bytes_ceil(15000000000)
'14G'
"""
if n >= (1024 ** 3):
return "%dG" % math.ceil(n / (1024 ** 3))
if n >= (1024 ** 2):
return "%dM" % math.ceil(n / (1024 ** 2))
if n >= 1024:
return "%dK" % math.ceil(n / 1024)
return "1K" % n
|
ce48c778b9605105ed9b66a55d27796fb90499cc
| 3,637,086
|
def factory_payment_account(corp_number: str = 'CP0001234', corp_type_code: str = 'CP',
payment_system_code: str = 'PAYBC'):
"""Factory."""
return PaymentAccount(
corp_number=corp_number,
corp_type_code=corp_type_code,
payment_system_code=payment_system_code,
party_number='11111',
account_number='4101',
site_number='29921',
)
|
896fe2ac0162455c4da97bd629d0e3f2d9b2a1e2
| 3,637,087
|
def foo():
"""多参数函数的传参书写格式, 和类实例化的格式"""
ret = foo_long(a=1, b=2, c=3, d=4,
e=5, f=6, g=7, h=8)
# 类实例化,传多个参数的格式
object_ = ClassName(
a=1, b=2, c=3, d=4,
e=5, f=6, g=7, h=8
)
return ret
|
4571ef723cab1601acfa01eb0765eaf8002df2e0
| 3,637,089
|
def posture_seq(directory,postures,sampling_fraction):
"""posture_seq grabs samples locomotion files from a directory and
converts them to strings of posture_sequences
Input:
directory = the directory containing locomotion files
postures = the mat file or numpy array of template postures
sampling_fraction = the fraction of files you want to sample
Output:
all_postures = a list of posture_sequences(of type string)
"""
num_postures = len(postures)
angle_data = loading_data(directory,sampling_fraction)[0]
i = 0
while i < len(angle_data):
if len(angle_data[i][1]) > 1000:
#get angles for the skeletons
angles, m_a = angle_data[i]
#X, Y = MA2skel(angles, m_a, 1)
#initialize Vars and posture_sequence:
#Vars = np.zeros(len(X))
posture_sequence = ''
for i in range(len(angles)):
distances = [np.inf]*num_postures
for j in range(num_postures):
distances[j] = np.linalg.norm(angles[i]-postures[:,j])
val = min(distances)
#angle_err[i] = val
ind = distances.index(val)
#Vars[i] = np.corrcoef(angles[i],postures[:,ind])[0][1]**2
posture_sequence = posture_sequence + ' ' + str(ind)
all_postures.append(posture_sequence)
i+=1
else:
i+=1
return all_postures
|
7e1554f85dfc68b293c9db5a5db3aa5bd6414bff
| 3,637,090
|
from ._finite_differences import _window1d, _lincomb
import torch
def membrane_diag(voxel_size=1, bound='dct2', dim=None, weights=None):
"""Diagonal of the membrane regulariser.
If no weight map is provided, the diagonal of the membrane regulariser
is a scaled identity with scale `2 * alpha`, where
`alpha = vx.reciprocal().square().sum()`
However, is a weight map is provided, the diagonal of the regulariser
is a convolved version of the weight map. In 2D, the convolution kernel
has a first order "diamond" shape:
b0
b1 a b1
b0
Parameters
----------
weights : (..., *spatial) tensor
Weights from the reweighted least squares scheme
voxel_size : float or sequence[float], default=1
Voxel size
bound : str, default='dct2'
Boundary condition.
dim : int, optional
Number of spatial dimensions.
Default: from voxel_size
Returns
-------
diag : () or (..., *spatial) tensor
Convolved weight map if provided.
Else, central convolution weight.
"""
vx = core.utils.make_vector(voxel_size)
if dim is None:
dim = len(vx)
vx = core.utils.make_vector(vx, dim)
if weights is not None:
weights = torch.as_tensor(weights)
backend = dict(dtype=weights.dtype, device=weights.device)
# move spatial dimensions to the front
spdim = list(range(weights.dim() - dim, weights.dim()))
weights = core.utils.movedim(weights, spdim, list(range(dim)))
else:
backend = dict(dtype=vx.dtype, device=vx.device)
vx = vx.to(**backend)
vx = vx.square().reciprocal()
if weights is None:
return 2 * vx.sum()
values = [[weights]]
dims = [None] + [d for d in range(dim) for _ in range(2)]
kernel = [2 * vx.sum()]
for d in range(dim):
values.extend(_window1d(weights, d, [-1, 1], bound=bound))
kernel += [vx[d], vx[d]]
weights = _lincomb(values, kernel, dims, ref=weights)
# send spatial dimensions to the back
weights = core.utils.movedim(weights, list(range(dim)), spdim)
return weights
|
3329c43aa5ae025a14660e1ddd4c1f658740e1d4
| 3,637,091
|
def get_groups(parsed, store, conf):
"""
Return groups based on argument provided
:param Namespace parsed: arguments parsed
:param store: Otter scaling group collection
:param dict conf: config
:return: Deferred fired with list of {"tenantId": .., "groupId": ..} dict
"""
log = mock_log()
if parsed.group:
groups = [g.split(":") for g in parsed.group]
return succeed(
[{"tenantId": tid, "groupId": gid} for tid, gid in groups])
elif parsed.all:
d = store.get_all_valid_groups()
elif parsed.tenant_id:
d = get_groups_of_tenants(log, store, parsed.tenant_id)
elif parsed.disabled_tenants:
non_conv_tenants = conf["non-convergence-tenants"]
d = store.get_all_valid_groups()
d.addCallback(
filter(lambda g: g["tenantId"] not in set(non_conv_tenants)))
d.addCallback(list)
elif parsed.conf_conv_tenants:
d = get_groups_of_tenants(log, store, conf["convergence-tenants"])
else:
raise SystemExit("Unexpected group selection")
return d
|
0441863984173236b09b50987c6f22838679a497
| 3,637,093
|
import json
def get_content_details(site_code, release_uuid, content_type, content_key):
""" get_content_details """
publisher_api = PublisherAPI()
content_release = None
try:
if release_uuid:
# get ContentRelease
content_release = WSSPContentRelease.objects.get(
site_code=site_code,
uuid=release_uuid,
)
else:
# get live ContentRelease
response = publisher_api.get_live_content_release(site_code)
if response['status'] == 'error':
return response
else:
release = response['content']
content_release = WSSPContentRelease.objects.get(id=release.id)
release_uuid = content_release.uuid
except WSSPContentRelease.DoesNotExist:
pass
# Fetch document from the content release.
response = publisher_api.get_document_from_content_release(
site_code,
release_uuid,
content_key,
content_type,
)
base_content_release = None
if response['status'] == 'error' and response['error_code'] == 'release_document_does_not_exist':
# Release doc not found, try in the base release for preview releases.
if content_release.status == 0:
if content_release.use_current_live_as_base_release:
response = publisher_api.get_live_content_release(site_code)
if response['status'] == 'success':
release = response['content']
base_content_release = WSSPContentRelease.objects.get(id=release.id)
else:
base_content_release = content_release.base_release
if base_content_release != None:
# Fetch document from the base content release if available (should only happen for preview releases).
response = publisher_api.get_document_from_content_release(
site_code,
base_content_release.uuid,
content_key,
content_type,
)
if response['status'] == 'success':
data = json.loads(response['content'].document_json)
response_extra = publisher_api.get_document_extra_from_content_release(
site_code,
release_uuid,
content_key,
content_type,
)
if response_extra['status'] == 'success':
try:
dynamic_element_keys = json.loads(response_extra['content'].get(key='dynamic_element_keys').content)
data, updated = document_load_dynamic_elements(content_release, data, dynamic_element_keys)
except:
pass
else:
return response
return data
|
f71a4e4584474e24cfb6d25aad2465538575cbdf
| 3,637,094
|
import scipy
def _czt(x, M=None, W=None, A=1.0):
"""Calculate CZT (Stripped down to the basics)."""
# Unpack arguments
N = len(x)
if M is None:
M = N
if W is None:
W = np.exp(-2j * np.pi / M)
A = np.complex128(A)
W = np.complex128(W)
# CZT algorithm
k = np.arange(max(M, N))
Wk22 = W ** (-(k ** 2) / 2)
r = Wk22[:N]
c = Wk22[:M]
X = A ** -k[:N] * x / r
X = scipy.linalg.matmul_toeplitz((c, r), X)
X /= c
return X
|
a0852eacd8d4e35e0c6e96cc59e8692d9d806c5d
| 3,637,095
|
from typing import Any
from typing import Optional
def build_obs_act_forward_fc(
n_out: int,
depth: int,
hidden: int,
act_layer: Any,
last_layer: Optional[Any] = None,
) -> hk.Transformed:
"""Build a simple fully-connected forward step that takes an observation & an action.
Args:
n_out (int): Number of outputs.
depth (int): Depth of layers.
hidden (int): # of hidden units of fc.
act_layer (Any): Activation layer.
last_layer (Any): Last activation layer.
Returns:
hk.Transformed:
Takes [batch x ?] observation and [batch x ?] actions.
Returns [batch x n_out] Array.
"""
@jax.vmap
def forward(obs: Array, act: Array) -> Array:
# concat observation and action
chex.assert_equal_rank((obs, act))
obs_act = jnp.hstack((obs, act))
# set up layers
modules = []
if depth > 0:
modules.append(hk.Linear(hidden))
for _ in range(depth - 1):
modules += [act_layer, hk.Linear(hidden)]
modules += [act_layer, hk.Linear(n_out)]
else:
modules.append(hk.Linear(n_out))
if last_layer is not None:
modules.append(last_layer)
return hk.Sequential(modules)(obs_act.astype(float))
return hk.without_apply_rng(hk.transform(forward))
|
0d330910730ccf80213852aa7cd08950f09e6300
| 3,637,097
|
def update_nested(key, d, other):
"""Update *d[key]* with the *other* dictionary preserving data.
If *d* doesn't contain the *key*, it is updated with *{key: other}*.
If *d* contains the *key*, *d[key]* is inserted into *other[key]*
(so that it is not overriden).
If *other* contains *key* (and possibly more nested *key*-s),
then *d[key]* is inserted into the deepest level
of *other.key.key...* Finally, *d[key]* becomes *other*.
Example:
>>> context = {"variable": {"name": "x"}}
>>> new_var_context = {"name": "n"}
>>> update_nested("variable", context, copy.deepcopy(new_var_context))
>>> context == {'variable': {'name': 'n', 'variable': {'name': 'x'}}}
True
>>>
>>> update_nested("variable", context, {"name": "top"})
>>> context == {
... 'variable': {'name': 'top',
... 'variable': {'name': 'n', 'variable': {'name': 'x'}}}
... }
True
*other* is modified in general. Create that on the fly
or use *copy.deepcopy* when appropriate.
Recursive dictionaries (containing references to themselves)
are strongly discouraged and meaningless when nesting.
If *other[key]* is recursive, :exc:`.LenaValueError` may be raised.
"""
# there was an idea to add a keyword argument copy_other
# (by default True), but the user can do that him/herself
# with copy.deepcopy when needed. Otherwise it would be
# unnecessary complication of this interface.
# Only one key is nested. This encourages design when
# 1) elements combine their contexts into one key
# (like {"split_into_bins": {"variable": {}, "histogram": {}}})
# 2) elements change only one key ("variable", "histogram",...).
def get_most_nested_subdict_with(key, d):
nested_dicts = []
while True:
if key in d:
if d in nested_dicts:
raise lena.core.LenaValueError(
"recursive *other* is forbidden"
)
nested_dicts.append(d)
d = d[key]
else:
return d
if key in d:
other_most_nested = get_most_nested_subdict_with(key, other)
# insert d[key] at the lowest other.key.key....
other_most_nested[key] = d[key]
d[key] = other
|
efbbfd576652710c92939581c48e32edce1a956e
| 3,637,098
|
def quicksort(arr, low, high):
""" Quicksort function uses the partition helper function.
"""
if low < high:
pi = partition(arr, low, high)
quicksort(arr, low, pi-1)
quicksort(arr, pi+1, high)
return arr
|
aa51f8536f47f8529c2bda74ea96138062d939e7
| 3,637,099
|
def make_word_dict():
"""read 'words.txt ' and create word list from it
"""
word_dict = dict()
fin = open('words.txt')
for line in fin:
word = line.strip()
word_dict[word] = ''
return word_dict
|
a4213cf5ff246200c7a55a6d1525d6fd6067e31f
| 3,637,100
|
def voidobject(key_position: int, offset: int) -> HitObject:
"""
引数から判定のないヒットオブジェクト(シングルノーツのみ)のHitObjectクラスを生成します
引数
----
key_position : int
-> キーポジション、1から入れる場合はkey_assetから参照したものを入れてください
offset : int
-> (配置する)オフセット値
戻り値
------
HitObject
-> 空ノーツのHitObjectクラス
"""
return HitObject(key_position, max_offset, True, end_offset=offset)
|
d7d47204bfb09592811fa85c4aa71e3e80bfa7bc
| 3,637,101
|
def mock_user_save():
"""Функция-пустышка для эмуляции исключения во время записи пользователя."""
def user_save(*args, **kwargs):
raise IntegrityError
return user_save
|
144ad41b9b9a2d477d622b6c2284c36514581ea1
| 3,637,102
|
def index():
"""首页"""
banners = Banner.query_used()
page = request.args.get("page", 1, type=int) # 指定的页码
per_page = current_app.config["MYZONE_ARTICLE_PER_PAGE"] # 每页的文章数
pagination = Article.query_order_by_createtime(page, per_page=per_page) # 创建分页器对象
articles = pagination.items # 从分页器中获取查询结果
categories = Category.query_all()
tags = Tag.query_all()
return render_template(
"main/index.html",
pagination=pagination,
articles=articles,
categories=categories,
tags=tags,
timestamp_to_strftime=timestamp_to_str,
func_id=0,
banners=banners,
)
|
ba3f6a558e4edb60025ef01832bb5ff5a1fb7f7a
| 3,637,103
|
def create_temporal_vis(ldf, col):
"""
Creates and populates Vis objects for different timescales in the provided temporal column.
Parameters
----------
ldf : lux.core.frame
LuxDataFrame with underspecified intent.
col : str
Name of temporal column.
Returns
-------
vlist : [Vis]
Collection of Vis objects.
"""
formatted_date = pd.to_datetime(ldf[col], format="%Y-%m-%d")
overall_vis = Vis([lux.Clause(col, data_type="temporal")], source=ldf, score=5)
year_col = col + " (year)"
year_df = LuxDataFrame({year_col: pd.to_datetime(formatted_date.dt.year, format="%Y")})
year_vis = Vis([lux.Clause(year_col, data_type="temporal")], source=year_df, score=4)
month_col = col + " (month)"
month_df = LuxDataFrame({month_col: formatted_date.dt.month})
month_vis = Vis(
[lux.Clause(month_col, data_type="temporal", timescale="month")], source=month_df, score=3
)
day_col = col + " (day)"
day_df = LuxDataFrame({day_col: formatted_date.dt.day})
day_df.set_data_type(
{day_col: "nominal"}
) # Since day is high cardinality 1-31, it can get recognized as quantitative
day_vis = Vis([lux.Clause(day_col, data_type="temporal", timescale="day")], source=day_df, score=2)
week_col = col + " (day of week)"
week_df = lux.LuxDataFrame({week_col: formatted_date.dt.dayofweek})
week_vis = Vis(
[lux.Clause(week_col, data_type="temporal", timescale="day of week")], source=week_df, score=1
)
unique_year_values = len(year_df[year_col].unique())
unique_month_values = len(month_df[month_col].unique())
unique_week_values = len(week_df[week_col].unique())
vlist = []
vlist.append(overall_vis)
if unique_year_values != 1:
vlist.append(year_vis)
if unique_month_values != 1:
vlist.append(month_vis)
if unique_week_values != 1:
vlist.append(week_vis)
return vlist
|
9a52600c1aac10a76b85b63c2879341dcc14b415
| 3,637,104
|
def num_neighbours(skel) -> np.ndarray:
"""Computes the number of neighbours of each skeleton pixel.
Parameters
----------
skel : (H, W) array_like
Input skeleton image.
Returns
-------
(H, W) array_like
Array containing the numbers of neighbours at each skeleton pixel and 0 elsewhere.
"""
skel = np.asarray(skel, dtype=int)
return filters.convolve(skel, _NB_MASK, mode='constant') * skel
|
aad9f1de0f192777ebc41e603cd6ac47aa3cd49f
| 3,637,106
|
def FakeSubject(n=300, conc=0.1, num_reads=400, prevalences=None):
"""Makes a fake Subject.
If prevalences is provided, n and conc are ignored.
n: number of species
conc: concentration parameter
num_reads: number of reads
prevalences: numpy array of prevalences (overrides n and conc)
"""
# generate random prevalences
if prevalences is None:
dirichlet = thinkbayes2.Dirichlet(n, conc=conc)
prevalences = dirichlet.Random()
prevalences.sort()
# generate a simulated sample
pmf = thinkbayes2.Pmf(dict(enumerate(prevalences)))
cdf = pmf.MakeCdf()
sample = cdf.Sample(num_reads)
# collect the species counts
hist = thinkbayes2.Hist(sample)
# extract the data
data = [count for species, count in hist.Items()]
data.sort()
# make a Subject and process
subject = Subject('simulated')
for species, count in hist.Items():
subject.Add(species, count)
subject.Done()
return subject
|
91230288344c55cd4417175560ec7b3e714d9f98
| 3,637,107
|
from datetime import datetime
import pytz
def build_results_candidate_people():
"""
Return DataFrame containing results, candidates, and people joined
"""
people = pd.read_csv('data/people.csv')
candidates = pd.read_csv('data/candidates.csv')
results = pd.read_csv('data/results.csv')
results_candidates = pd.merge(
results #[['candidate_id', 'person_id', 'smd_id']]
, candidates #[['candidate_id']]
, how='left'
, on=['candidate_id', 'smd_id']
)
rcp = pd.merge(results_candidates, people, how='left', on='person_id') # results-candidates-people
# Determine who were incumbent candidates at the time of the election
election_date = datetime(2020, 11, 3, tzinfo=pytz.timezone('America/New_York'))
commissioners = list_commissioners(status=None)
incumbents = commissioners[(commissioners.start_date < election_date) & (election_date < commissioners.end_date)]
incumbent_candidates = pd.merge(incumbents, candidates, how='inner', on='person_id')
incumbent_candidates['is_incumbent'] = True
rcp = pd.merge(rcp, incumbent_candidates[['candidate_id', 'is_incumbent']], how='left', on='candidate_id')
rcp['is_incumbent'] = rcp['is_incumbent'].fillna(False)
# Sort by SMD ascenting, Votes descending
rcp = rcp.sort_values(by=['smd_id', 'votes'], ascending=[True, False])
# Placeholder name for all write-in candidates.
# We do not know the combination of name and vote count for write-in candidates
# We only know the name of the write-in winners
rcp['full_name'] = rcp['full_name'].fillna('Write-ins combined')
rcp['write_in_winner_int'] = rcp['write_in_winner'].astype(int)
return rcp
|
5e330b026b3546e728f9a06df33eaf8fc429775c
| 3,637,108
|
def div(lhs: Value, rhs: Value) -> Value:
""" Divides `lhs` by `rhs`. """
return lhs.run() // rhs.run()
|
73cb05b536c94e56331054e92e7d9fb84f75fdb5
| 3,637,109
|
def get_seat_total_per_area(party_id: PartyID) -> dict[AreaID, int]:
"""Return the number of seats per area for that party."""
area_ids_and_seat_counts = db.session \
.query(
DbArea.id,
db.func.count(DbSeat.id)
) \
.filter_by(party_id=party_id) \
.outerjoin(DbSeat) \
.group_by(DbArea.id) \
.all()
return dict(area_ids_and_seat_counts)
|
35aced1f8e149a06f54ed43f41b80f796608316b
| 3,637,110
|
def toCamelCase(string: str):
"""
Converts a string to camel case
Parameters
----------
string: str
The string to convert
"""
string = str(string)
if string.isupper():
return string
split = string.split("_") # split by underscore
final_split = []
for s in split:
final_split.extend(s.split(" ")) # split by space
return "".join(l.capitalize() if index > 0 else l for index, l in enumerate(final_split))
|
5197ad3353f2e88ccf1dfca62aeae59260e016e7
| 3,637,111
|
def aggregate_testsuite(testsuite):
""" Compute aggregate results for a single test suite (ElemTree node)
:param testsuite: ElemTree XML node for a testsuite
:return: AggregateResult
"""
if testsuite is None:
return None
tests = int(testsuite.attrib.get('tests') or 0)
failures = int(testsuite.attrib.get('failures') or 0)
disabled = int(testsuite.attrib.get('disabled') or 0)
errors = int(testsuite.attrib.get('errors') or 0)
duration = float(testsuite.attrib.get('time') or 0.0)
success_rate = (tests - failures) / float(tests) if tests else 0.0
return AggregateResult(tests=tests, failures=failures, disabled=disabled, errors=errors, success_rate=success_rate,
duration=duration)
|
3b7ff5b353e0f6efffed673e1dcb463f00a0e708
| 3,637,112
|
def rowwidth(view, row):
"""Returns the number of characters of ``row`` in ``view``.
"""
return view.rowcol(view.line(view.text_point(row, 0)).end())[1]
|
f8db1bf6e3d512d1a2bd5eeb059af93e8ac3bc5f
| 3,637,113
|
import json
def dry_query(event, *args):
"""Handles running a dry query
Args:
url: dry_query?page&page_length&review_id
body:
search: search dict <wrapper/input_format.py>
Returns:
{
<wrapper/output_format.py>
}
"""
# try:
body = json.loads(event["body"])
search = body.get('search')
try:
page = int(event.get('queryStringParameters').get('page', 1))
except AttributeError:
page = 1
try:
page_length = int(
event.get('queryStringParameters').get('page_length', 50))
except AttributeError:
page_length = 50
results = slr.conduct_query(search, page, page_length)
# (optionally) mark previously persisted results
try:
review_id = event.get('queryStringParameters').get('review_id')
review = connector.get_review_by_id(review_id)
results = slr.results_persisted_in_db(results, review)
except AttributeError:
pass
return make_response(status_code=201, body=results)
# except Exception as e:
# return make_response(status_code=500, body={"error": e})
|
0c69da353d958e9628e31dce68fe6bcafd482f2c
| 3,637,115
|
def fixed_prior_to_measurements(coords, priors):
"""
Convert the fixed exchange and met conc priors to measurements.
"""
fixed_exchange = get_name_ordered_overlap(coords, "reaction_ind", ["exchange", "fixed_x_names"])
fixed_met_conc = get_name_ordered_overlap(coords, "metabolite_ind", ["metabolite", "fixed_x_names"])
prior_met_conc_fixed = extract_prior_2d("metabolite", priors, fixed_met_conc, coords["condition"],
DEFAULT_MET_CONC_MEAN, DEFAULT_MET_CONC_SCALE)
prior_exchange_fixed = extract_prior_2d("exchange", priors, fixed_exchange, coords["condition"],
DEFAULT_EXCHANGE_MEAN, DEFAULT_EXCHANGE_SCALE)
# Expand the IndPrior2d to the pandas dataframe format
fixed_met_prior_df = prior_met_conc_fixed.to_dataframe("mic").rename(
columns={"parameter": "target_id", "loc": "measurement", "scale": "error_scale"})
fixed_exchange_prior_df = prior_exchange_fixed.to_dataframe("flux").rename(
columns={"parameter": "target_id", "loc": "measurement", "scale": "error_scale"})
return fixed_exchange_prior_df, fixed_met_prior_df
|
3dab3eddb5f785dd04bba4caddbc631a0cdfd187
| 3,637,116
|
def get_batch_size():
"""Returns the batch size tensor."""
return get_global_variable(GraphKeys.BATCH_SIZE)
|
4b030738c78fa5a06d27a2aee62f15ff3e6be347
| 3,637,117
|
from altdataset import CSVDataset
def get_dataloader(config: ExperimentConfig, tfms: Tuple[List, List] = None):
""" get the dataloaders for training/validation """
if config.dim > 1:
# get data augmentation if not defined
train_tfms, valid_tfms = get_data_augmentation(config) if tfms is None else tfms
# check number of jobs requested and CPUs available
num_cpus = os.cpu_count()
if num_cpus < config.n_jobs:
logger.warning(f'Requested more workers than available (n_jobs={config.n_jobs}, # cpus={num_cpus}). '
f'Setting n_jobs={num_cpus}.')
config.n_jobs = num_cpus
# define dataset and split into training/validation set
use_nii_ds = config.ext is None or 'nii' in config.ext
dataset = MultimodalNiftiDataset.setup_from_dir(config.source_dir, config.target_dir, Compose(train_tfms),
preload=config.preload) if use_nii_ds else \
MultimodalImageDataset.setup_from_dir(config.source_dir, config.target_dir, Compose(train_tfms),
ext='*.' + config.ext, color=config.color, preload=config.preload)
logger.info(f'Number of training images: {len(dataset)}')
if config.valid_source_dir is not None and config.valid_target_dir is not None:
valid_dataset = MultimodalNiftiDataset.setup_from_dir(config.valid_source_dir, config.valid_target_dir,
Compose(valid_tfms),
preload=config.preload) if use_nii_ds else \
MultimodalImageDataset.setup_from_dir(config.valid_source_dir, config.valid_target_dir,
Compose(valid_tfms),
ext='*.' + config.ext, color=config.color, preload=config.preload)
logger.info(f'Number of validation images: {len(valid_dataset)}')
train_loader = DataLoader(dataset, batch_size=config.batch_size, num_workers=config.n_jobs, shuffle=True,
pin_memory=config.pin_memory, worker_init_fn=init_fn)
valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, num_workers=config.n_jobs,
pin_memory=config.pin_memory, worker_init_fn=init_fn)
else:
# setup training and validation set
num_train = len(dataset)
indices = list(range(num_train))
split = int(config.valid_split * num_train)
valid_idx = np.random.choice(indices, size=split, replace=False)
train_idx = list(set(indices) - set(valid_idx))
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# set up data loader for nifti images
train_loader = DataLoader(dataset, sampler=train_sampler, batch_size=config.batch_size,
num_workers=config.n_jobs, pin_memory=config.pin_memory, worker_init_fn=init_fn)
valid_loader = DataLoader(dataset, sampler=valid_sampler, batch_size=config.batch_size,
num_workers=config.n_jobs, pin_memory=config.pin_memory, worker_init_fn=init_fn)
else:
try:
except (ImportError, ModuleNotFoundError):
raise SynthtorchError('Cannot use 1D ConvNet in CLI without the altdataset toolbox.')
train_dataset, valid_dataset = CSVDataset(config.source_dir[0]), CSVDataset(config.valid_source_dir[0])
train_loader = DataLoader(train_dataset, batch_size=config.batch_size, num_workers=config.n_jobs, shuffle=True,
pin_memory=config.pin_memory)
valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, num_workers=config.n_jobs,
pin_memory=config.pin_memory)
return train_loader, valid_loader
|
d314a0bf6f7c9707ce46127e06bc8c22183246f1
| 3,637,118
|
def retournerTas(x,numéro):
"""
retournerTas(x,numéro) retourne la partie du tas x qui commence à
l'indice numéro
"""
tasDuBas = x[:numéro]
tasDuHaut = x[numéro:]
tasDuHaut.reverse()
result = tasDuBas + tasDuHaut
# print(result)
return result
|
579798cf5fe8bec02109bfd46c5a945faee1a42c
| 3,637,119
|
def nback(n, k, length):
"""Random n-back targets given n, number of digits k and sequence length"""
Xi = random_state.randint(k, size=length)
yi = np.zeros(length, dtype=int)
for t in range(n, length):
yi[t] = (Xi[t - n] == Xi[t])
return Xi, yi
|
37ec70fdc60104fc5a99c6ba13923a2e3d56f0a4
| 3,637,121
|
def makeStateVector(sys, start_time=0):
"""
Constructs the initial state vector recursively.
Parameters
----------
sys: inherits from control.InputOutputSystem
start_time: float
Returns
-------
list
"""
x_lst = []
if "InterconnectedSystem" in str(type(sys)):
for sub_sys in sys.syslist:
x_lst.extend(makeStateVector(sub_sys, start_time=start_time))
elif isinstance(sys, ctl.NonlinearIOSystem):
x_lst.extend(sys.makeStateSer().values)
else:
new_state = list(np.repeat(0, sys.nstates))
x_lst.extend(new_state)
result = [float(v) for v in x_lst]
return result
|
e184d476c9ba94d88ee462c95987cabc31e459d0
| 3,637,122
|
def make_random_tensors(spec_structure, batch_size = 2):
"""Create random inputs for tensor_spec (for unit testing).
Args:
spec_structure: A dict, (named)tuple, list or a hierarchy thereof filled by
TensorSpecs(subclasses).
batch_size: If None, we will have a flexible shape (None,) + shape. If <= 0
we will omit an explicit batch dimension and otherwise have a fixed
(batch_size,) + shape.
Returns:
Equivalent structure as spec_structure, with TensorSpecs converted to
placeholders with variable batch size.
"""
assert_valid_spec_structure(spec_structure)
def make_random(t):
maxval = 255 if t.dtype in [tf.uint8, tf.int32, tf.int64] else 1.0
dtype = tf.int32 if t.dtype == tf.uint8 else t.dtype
shape = tuple(t.shape.as_list())
if batch_size is None:
shape = (None,) + shape
if batch_size > 0:
shape = (batch_size,) + shape
r = tf.random_uniform(shape, maxval=maxval, dtype=dtype)
return tf.cast(r, t.dtype)
return nest.map_structure(make_random, spec_structure)
|
dd2569def0863b1e9722de9c6175e680353ccf56
| 3,637,123
|
def simulate(robot, task, opt_seed, thread_count, episode_count=1):
"""Run trajectory optimization for the robot on the given task, and return the
resulting input sequence and result."""
robot_init_pos, has_self_collision = presimulate(robot)
if has_self_collision:
return None, None # return None if there are collisions in design
def make_sim_fn(): # make a simulation environment
sim = rd.BulletSimulation(task.time_step)
task.add_terrain(sim)
# Rotate 180 degrees around the y axis, so the base points to the right
sim.add_robot(robot, robot_init_pos, rd.Quaterniond(0.0, 0.0, 1.0, 0.0))
return sim
main_sim = make_sim_fn() # initialise simulation
robot_idx = main_sim.find_robot_index(robot) # get robot index of current robot
dof_count = main_sim.get_robot_dof_count(robot_idx) # get number of DOF
if episode_count >= 2:
value_estimator = rd.FCValueEstimator(main_sim, robot_idx, 'cpu', 64, 3, 1)
else:
value_estimator = rd.NullValueEstimator()
input_sampler = rd.DefaultInputSampler()
objective_fn = task.get_objective_fn() # get objective function (dot product of robot motion)
replay_obs = np.zeros((value_estimator.get_observation_size(), 0))
replay_returns = np.zeros(0)
for episode_idx in range(episode_count):
optimizer = rd.MPPIOptimizer(1.0, task.discount_factor, dof_count,
task.interval, task.horizon, 512,
thread_count, opt_seed + episode_idx,
make_sim_fn, objective_fn, value_estimator,
input_sampler)
optimizer.update() # run simulations to estimate values of final states
optimizer.set_sample_count(64) # decrease sample count
main_sim.save_state() # save simulation state
input_sequence = np.zeros((dof_count, task.episode_len))
obs = np.zeros((value_estimator.get_observation_size(),
task.episode_len + 1), order='f')
rewards = np.zeros(task.episode_len * task.interval)
for j in range(task.episode_len): # for length of episode
optimizer.update() # run simulation to estimate values of final states and update input sequence
input_sequence[:,j] = optimizer.input_sequence[:,0] # get input sequence??
optimizer.advance(1) # advance the robot(s) 1 step in the simulation
value_estimator.get_observation(main_sim, obs[:,j]) # ??
for k in range(task.interval): # for length of interval
main_sim.set_joint_targets(robot_idx, # set joint targets for each joint
input_sequence[:,j].reshape(-1, 1))
task.add_noise(main_sim, j * task.interval + k) # add noise to the force and torque of each joint
main_sim.step() # move the robot one step in the simulation
rewards[j * task.interval + k] = objective_fn(main_sim) # update the reward from return value of objective function
value_estimator.get_observation(main_sim, obs[:,-1]) # ??
main_sim.restore_state() # restore previously saved state
# Only train the value estimator if there will be another episode
if episode_idx < episode_count - 1:
returns = np.zeros(task.episode_len + 1)
# Bootstrap returns with value estimator
value_estimator.estimate_value(obs[:,task.episode_len], returns[-1:])
for j in reversed(range(task.episode_len)):
interval_reward = np.sum(
rewards[j * task.interval:(j + 1) * task.interval])
returns[j] = interval_reward + task.discount_factor * returns[j + 1]
replay_obs = np.hstack((replay_obs, obs[:,:task.episode_len]))
replay_returns = np.concatenate((replay_returns,
returns[:task.episode_len]))
value_estimator.train(replay_obs, replay_returns)
return input_sequence, np.mean(rewards) # return the stepping sequence and average reward
|
13c069282636e7b4215654d958621ed418bc40a8
| 3,637,124
|
import time
def config_worker():
"""
Enable worker functionality for AIO system.
:return: True if worker-config-complete is executed
"""
if utils.get_system_type() == si_const.TIS_AIO_BUILD:
console_log("Applying worker manifests for {}. "
"Node will reboot on completion."
.format(utils.get_controller_hostname()))
sysinv.do_worker_config_complete(utils.get_controller_hostname())
time.sleep(30)
# worker-config-complete has no logs to console. So, wait
# for some time before showing the login prompt.
for i in range(1, 10):
console_log("worker-config in progress..")
time.sleep(30)
console_log("Timed out on do_worker_config_complete")
raise CloneFail("Timed out on do_worker_config_complete")
return True
else:
# worker_config_complete is not needed.
return False
|
4ab82a2988a70ec9fe2f2ab6aa45099b7237b07a
| 3,637,125
|
def convert_dict_to_df(dict_data: dict):
"""
This method is used to convert dictionary data to pandas data frame
:param dict_data:
:return:
"""
# create df using dict
dict_data_df = pd.DataFrame.from_dict([dict_data])
# return the converted df
return dict_data_df
|
550e33b0b3bacbdfb3abeb8019296be2c647000e
| 3,637,126
|
def sec2msec(sec):
"""Convert `sec` to milliseconds."""
return int(sec * 1000)
|
f1b3c0bf60ab56615ed93f295e7716e56c6a1117
| 3,637,127
|
import aiohttp
async def _request(session:aiohttp.ClientSession, url:str, headers:dict[str,str]) -> str:
"""
获取单一url的愿望单页面
"""
async with session.get(url=url, headers=headers, proxy=PROXY) as resp:
try:
text = await resp.text()
except Exception as err:
text = ""
logger.error(f'请求愿望单时发生错误: {err}')
return text
|
f891736d4598adc0005c096e12ab43d41544ab36
| 3,637,128
|
def get_pretrained_i2v(name, model_dir=MODEL_DIR):
"""
Parameters
----------
name
model_dir
Returns
-------
i2v model: I2V
"""
if name not in MODELS:
raise KeyError(
"Unknown model name %s, use one of the provided models: %s" % (name, ", ".join(MODELS.keys()))
)
_class, *params = MODELS[name]
return _class.from_pretrained(*params, model_dir=model_dir)
|
75657f039763ae73219eae900061a426ed2b11fd
| 3,637,129
|
def object_get_HostChilds(obj):
"""Return List of Objects that have set Host(s) to this object."""
# source:
# FreeCAD/src/Mod/Arch/ArchComponent.py
# https://github.com/FreeCAD/FreeCAD/blob/master/src/Mod/Arch/ArchComponent.py#L1109
# def getHosts(self,obj)
hosts = []
for link in obj.InListRecursive:
if hasattr(link, "Host"):
if link.Host:
if link.Host == obj:
hosts.append(link)
elif hasattr(link, "Hosts"):
if link.Hosts:
if obj in link.Hosts:
hosts.append(link)
return hosts
|
dccba2ef151207ebaa42728ee1395e1b0ec48e7d
| 3,637,130
|
import torch
def collate_fn(batch):
"""
Collate function for combining Hdf5Dataset returns
:param batch: list
List of items in a batch
:return: tuple
Tuple of items to return
"""
# batch is a list of items
numEntries = [];
allTensors = [];
allLabels = [];
for item in batch:
assert(len(item) % 2 == 0), "Both labels and tensors are expected";
numEntries.append(len(item) // 2);
allTensors.extend(item[: len(item) // 2]);
allLabels.extend(item[len(item) // 2:]);
# Determine how much to pad each tensor to and pad it; always pad on the right side
maxLength = max([t.shape[-1] for t in allTensors]);
newAllTensors = [];
paddings = [];
for t in allTensors:
numTotalPad = maxLength - t.shape[-1];
if numTotalPad > 0:
pad = (0, numTotalPad);
t = torch.nn.functional.pad(t, pad);
paddings.append(numTotalPad)
else:
paddings.append(0);
newAllTensors.append(t);
allTensors = torch.stack(newAllTensors, dim=0);
allLabels = torch.Tensor(allLabels);
numEntries = torch.LongTensor(numEntries);
allPaddings = torch.LongTensor(paddings);
return allTensors, allLabels, allPaddings, numEntries;
|
b49ec88b4de844787d24140f5ef99ad9a573c6e3
| 3,637,131
|
def test_psf_estimation(psf_data, true_psf_file, kernel=None, metric='mean'):
"""Test PSF Estimation
This method tests the quality of the estimated PSFs
Parameters
----------
psf_data : np.ndarray
Estimated PSFs, 3D array
true_psf_file : str
True PSFs file name
kernel : int, optional
Standard deviation of Gaussian kernel
metric : str {mean, median}, optional
Metric for averaging results (default is 'mean')
Returns
-------
np.ndarray pixel errors, np.ndarray ellipticity errors
Raises
------
ValueError
If the number of clean images does not match the number of deconvolved
images
"""
true_psf = read_file(true_psf_file)
if true_psf.shape != psf_data.shape:
raise ValueError('The number of true PSF images must match the number '
'estimated PSF images.')
return test_images(psf_data, true_psf, kernel, metric)
|
10feef6a483cfa6345561dcf5d1717a466a78c7d
| 3,637,132
|
def EulerBack(V_m0,n_0,m_0,h_0,T,opcion,t1,t2,t3,t4,I1,I2,h_res=0.01):
"""
:param V_m0: Potencial de membrana inicial
:param n_0: Probabilidad inicial de n
:param m_0: Probabilidad inicial de m
:param h_0: Probabilidad inicial de h
:param T: Temperatura indicada por el usuario
:param opcion: * 1: Si la corriente es fija. * 2: Si la corriente es variable.
:param t1: [mS] Valor inicial del intervalo de tiempo 1.
:param t2: [mS] Valor final del intervalo de tiempo 1.
:param t3: [mS] Valor inicialdel intervalo de tiempo 2.
:param t4: [mS] Valor final del intervalo de tiempo 2.
:param I1: [mV] Intensidad de corriente del intervalo de tiempo 1.
:param I2: [mV] Intensidad de corriente del intervalo de tiempo 2.
:param h_res: [mS] Resolución o Step de tiempo para crear el rango. Default = 0.01 [mS]
:return: Tupla [t,Vm_EulerBack] -> t: Intervalo de tiempo de simulación.
Vm_EulerBack: Potencial de membrana para cada tiempo t de la simulación.
"""
phi_val = phi(T) # Se calcula el factor de temperatura (Φ)
t, I = tiempo_y_corriente(opcion,t1,t2,t3,t4,I1,I2,h_res) # Se crean arreglos de tiempo de simulación y corriente
# Se crean los vectores que almacenarán las soluciones (estimaciones) para Vm(t), n(t), m(t) y h(t) de cada iterac.
Vm_EulerBack, n_EulerBack, m_EulerBack, h_EulerBack = creacionArreglos(V_m0,n_0,m_0,h_0, t)
# El sistema de ecuaciones planteado en la función FAux_EulerBack, se resuelve usando fsolve para hallar las
# raíces del modelo.
for iter in range(1, len(t)):
BackRoots = opt.fsolve(FAux_EulerBack, np.array([Vm_EulerBack[iter - 1],
n_EulerBack[iter - 1],
m_EulerBack[iter - 1],
h_EulerBack[iter - 1]]),
(I[iter], Vm_EulerBack[iter - 1], n_EulerBack[iter - 1], m_EulerBack[iter - 1],
h_EulerBack[iter - 1], phi_val, h_res))
# Se extraen los vectores de solución de cada una de las columnas de la matriz de raíces.
Vm_EulerBack[iter] = BackRoots[0]
n_EulerBack[iter] = BackRoots[1]
m_EulerBack[iter] = BackRoots[2]
h_EulerBack[iter] = BackRoots[3]
return t, Vm_EulerBack
|
33660894f80d3060206da3ddbb96d40b8453fc72
| 3,637,133
|
def wiggle(shape, scope, offset, seed=0):
"""Shift points/contours/paths by a random amount."""
if shape is None: return None
functions = { "points": wiggle_points,
"contours": wiggle_contours,
"paths": wiggle_paths}
fn = functions.get(scope)
if fn is None: return None
return fn(shape, offset, seed)
|
0cd587646013810ca512de5d327c2fdc24b110f5
| 3,637,134
|
def parseAndDisplay(line, indentLevel):
"""Indents lines."""
if line.startswith("starting "):
printArgumentLine(indentLevel, line)
indentLevel += 1
elif line.startswith("ending "):
indentLevel -= 1
printArgumentLine(indentLevel, line)
else:
printLine(indentLevel, line)
return indentLevel
|
14c9ebe27140aa77f5f7980e1da2bec30e7ccf8b
| 3,637,135
|
def insert_question(question):
"""
Insert a particular question
@param: question - JSON object containing question data to be inserted
"""
return db.questions.insert_one(question)
|
f4d22a137a1e7d9fbe43a1e03414d551cceb27c9
| 3,637,136
|
def sequence_vectorize(train_texts, val_texts):
"""Vectorizes texts as sequence vectors.
1 text = 1 sequence vector with fixed length.
# Arguments
train_texts: list, training text strings.
val_texts: list, validation text strings.
# Returns
x_train, x_val, word_index: vectorized training and validation
texts and word index dictionary.
"""
# Create vocabulary with training texts.
tokenizer = text.Tokenizer(num_words=TOP_K)
tokenizer.fit_on_texts(train_texts)
# Vectorize training and validation texts.
x_train = tokenizer.texts_to_sequences(train_texts)
x_val = tokenizer.texts_to_sequences(val_texts)
# Get max sequence length.
max_length = len(max(x_train, key=len))
if max_length > MAX_SEQUENCE_LENGTH:
max_length = MAX_SEQUENCE_LENGTH
# Fix sequence length to max value. Sequences shorter than the length are
# padded in the beginning and sequences longer are truncated
# at the beginning.
x_train = sequence.pad_sequences(x_train, maxlen=max_length)
x_val = sequence.pad_sequences(x_val, maxlen=max_length)
return x_train, x_val, tokenizer.word_index
|
f32c40ca2f8bc6d2c78f8093ccf94fee192b87c8
| 3,637,137
|
def parse_preferences(file, preferences):
"""Parse preferences to the dictionary."""
for line in open(file, "r").readlines():
# all lower case
line = line.lower()
# ignore comment lines
if line[0] == "!" or line[0] == "#" or not line.split():
continue
key = line.split(":")[0].strip()
value = line.split(":")[1].strip()
value = check(key, value)
add_preference(key, value)
return preferences
|
09c0251cd34cfbb6c9342eccd697a08259c744c6
| 3,637,138
|
def func_hex2str(*args):
"""字符串 -> Hex"""
return func_hex2byte(*args).decode('utf-8')
|
732f333cd942ecd8bee4ac4b974f0301e0c69baf
| 3,637,139
|
import collections
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
|
801833664a67e5d6e62dfb5379cabeb1b1b5058c
| 3,637,141
|
from typing import List
def triage(routes: List[Route]) -> Route:
"""
This function will be used to determine which route to use
"""
eva = {}
for i, route in enumerate(routes):
stored_route: StoredRoute = route.pop("stored_route")
reg_path = stored_route["path"]
segments = [s for s in reg_path.split("/") if s]
eva[i] = len([seg for seg in segments if not seg.startswith("(?P") and not seg.endswith(")")])
dt = {v: routes[k] for k, v in eva.items()}
return dt[max(dt)]
|
625b143c3284526b71d21a7c0113e892df92ed3a
| 3,637,142
|
def upsert_object(data, cursor=None):
"""
Upsert an object in the repository.
"""
cursor = check_cursor(cursor)
data = _set_object_defaults(data, cursor)
cursor.execute('''
INSERT INTO objects (pid_id, namespace, state, owner, label, versioned,
log, created, modified)
VALUES (%(pid_id)s, %(namespace)s, %(state)s, %(owner)s, %(label)s,
%(versioned)s, %(log)s, %(created)s, %(modified)s)
ON CONFLICT (pid_id, namespace) DO UPDATE
SET (pid_id, namespace, state, owner, label, versioned, log,
modified) = (%(pid_id)s, %(namespace)s, %(state)s, %(owner)s,
%(label)s, %(versioned)s, %(log)s, %(modified)s)
RETURNING id
''', data)
logger.info("Upserted into namespace: %s with PID ID: %s.",
data['namespace'], data['pid_id'])
return cursor
|
de0de4a48bf4f1d846938e174bb5a5300dd49083
| 3,637,143
|
import torch
def sparsity_line(M,tol=1.0e-3,device='cpu'):
"""Get the line sparsity(%) of M
Attributes:
M: Tensor - the matrix.
tol: Scalar,optional - the threshold to select zeros.
device: device, cpu or gpu
Returns:
spacity: Scalar (%)- the spacity of the matrix.
"""
if type(M) is not torch.Tensor:
M = torch.as_tensor(M,device=device)
M1 = torch.where(torch.abs(M)<tol,torch.zeros_like(M),M)
M1_sum = torch.sum(M1, 1)
nb_nonzero = len(M1_sum.nonzero())
return (1.0-nb_nonzero/M1.shape[0])*100
|
b8675a768c8686571d1f7709d89e3abeb5b56a80
| 3,637,144
|
def geospace(lat0, lon0, length, dx, strike):
""" returns a series of points in geographic coordinates"""
pts_a = []
npts = length // dx + 1
for idx in range(npts):
# convert to lat, lon
new = convert_local_idx_to_geo(idx, lat0, lon0, length, dx, strike)
pts_a.append(new)
return np.array(pts_a)
|
78a380b59768cf83eca8edba5f1e21a0b6b61636
| 3,637,145
|
def linearOutcomePrediction(zs, params_pred, scope=None):
"""
English:
Model for predictions outcomes from latent representations Z,
zs = batch of z-vectors (encoder-states, matrix)
Japanese:
このモデルにおける、潜在表現Zから得られる出力の予測です。
zs = ベクトル z のバッチ(袋)です。 (encoder の状態であり、行列です)
(恐らく、[z_0, z_1, z_2, ...] というような意味)
"""
with s2s.variable_scope.variable_scope(scope or "outcomepred", reuse=True):
coefficients, bias = params_pred
outcome_preds = tf.add(tf.matmul(zs, coefficients), bias)
return outcome_preds
|
3e92fe0c0d16d8565066216c1da96b6fdbeb8dc9
| 3,637,146
|
from datetime import datetime
import collections
def _check_flag_value(flag_value):
"""
Search for a given flag in a given blockette for the current record.
This is a utility function for set_flags_in_fixed_headers and is not
designed to be called by someone else.
This function checks for valid entries for a flag. A flag can be either
* ``bool`` value to be always True or False for all the records
* ``datetime`` or ``UTCDateTime`` value to add a single 'INSTANT' datation
(see below)
* ``dict`` to allow complex flag datation
** The dict keys may be the keyword INSTANT to mark arbitrarly short
duration flags, or the keyword DURATION to mark events that span across
time.
** The dict values are:
*** for the INSTANT value, a single UTCDateTime or datetime object, or a
list of these datation objects
*** for the DURATION value, either a list of
[start1, end1, start2, end2, ...] or a list of tuples
[(start1, end1), (start2, end2), ...]
This function then returns all datation events as a list of tuples
[(start1, end1), ...] to ease the work of _convert_flags_to_raw_byte. Bool
values are unchanged, instant events become a tuple
(event_date, event_date).
If the flag value is incorrect, a ValueError is raised with a (hopefully)
explicit enough message.
:type flag_value: bool or dict
:param flag_value: the flag value to check.
:return: corrected value of the flag.
:raises: If the flag is not the one expected, a ``ValueError`` is raised
"""
if isinstance(flag_value, bool):
# bool allowed
corrected_flag = flag_value
elif isinstance(flag_value, datetime) or \
isinstance(flag_value, UTCDateTime):
# A single instant value is allowed
utc_val = UTCDateTime(flag_value)
corrected_flag = [(utc_val, utc_val)]
elif isinstance(flag_value, collections.Mapping):
# dict allowed if it has the right format
corrected_flag = []
for flag_key in flag_value:
if flag_key == "INSTANT":
# Expected: list of UTCDateTime
inst_values = flag_value[flag_key]
if isinstance(inst_values, datetime) or \
isinstance(inst_values, UTCDateTime):
# Single value : ensure it's UTCDateTime and store it
utc_val = UTCDateTime(inst_values)
corrected_flag.append((utc_val, utc_val))
elif isinstance(inst_values, collections.Sequence):
# Several instant values : check their types
# and add each of them
for value in inst_values:
if isinstance(value, datetime) or \
isinstance(value, UTCDateTime):
utc_val = UTCDateTime(value)
corrected_flag.append((utc_val, utc_val))
else:
msg = "Unexpected type for flag duration " +\
"'INSTANT' %s"
raise ValueError(msg % str(type(inst_values)))
else:
msg = "Unexpected type for flag duration 'INSTANT' %s"
raise ValueError(msg % str(type(inst_values)))
elif flag_key == "DURATION":
# Expecting either a list of tuples (start, end) or
# a list of (start1, end1, start1, end1)
dur_values = flag_value[flag_key]
if isinstance(dur_values, collections.Sequence):
if len(dur_values) != 0:
# Check first item
if isinstance(dur_values[0], datetime) or \
isinstance(dur_values[0], UTCDateTime):
# List of [start1, end1, start2, end2, etc]
# Check len
if len(dur_values) % 2 != 0:
msg = "Expected even length of duration " +\
"values, got %s"
raise ValueError(msg % len(dur_values))
# Add values
duration_iter = iter(dur_values)
for value in duration_iter:
start = value
end = dur_values[dur_values.index(value) + 1]
# Check start type
if not isinstance(start, datetime) and \
not isinstance(start, UTCDateTime):
msg = "Incorrect type for duration " +\
"start %s"
raise ValueError(msg % str(type(start)))
# Check end type
if not isinstance(end, datetime) and \
not isinstance(end, UTCDateTime):
msg = "Incorrect type for duration " +\
"end %s"
raise ValueError(msg % str(type(end)))
# Check duration validity
start = UTCDateTime(start)
end = UTCDateTime(end)
if start <= end:
corrected_flag.append((start, end))
else:
msg = "Flag datation: expected end of " +\
"duration after its start"
raise ValueError(msg)
next(duration_iter)
elif isinstance(dur_values[0], collections.Sequence):
# List of tuples (start, end)
for value in dur_values:
if not isinstance(value, collections.Sequence):
msg = "Incorrect type %s for flag duration"
raise ValueError(msg % str(type(value)))
elif len(value) != 2:
msg = "Incorrect len %s for flag duration"
raise ValueError(msg % len(value))
else:
start = value[0]
end = value[1]
# Check start type
if not isinstance(start, datetime) and \
not isinstance(start, UTCDateTime):
msg = "Incorrect type for duration " +\
"start %s"
raise ValueError(msg %
str(type(start)))
# Check end type
if not isinstance(end, datetime) and \
not isinstance(end, UTCDateTime):
msg = "Incorrect type for duration " +\
"end %s"
raise ValueError(msg % str(type(end)))
if start <= end:
corrected_flag.append((start, end))
else:
msg = "Flag datation: expected end " +\
"of duration after its start"
raise ValueError(msg)
# Else: len(dur_values) == 0, empty duration list:
# do nothing
else:
msg = "Incorrect DURATION value: expected a list of " +\
"tuples (start, end), got %s"
raise ValueError(msg % str(type(dur_values)))
else:
msg = "Invalid key %s for flag value. One of " +\
"'INSTANT', 'DURATION' is expected."
raise ValueError(msg % flag_key)
else:
msg = "Invalid type %s for flag value. Allowed values " +\
"are bool or dict"
raise ValueError(msg % str(type(flag_value)))
return corrected_flag
|
2e4da676ad7abf95aa157aaca5aae80975b893e2
| 3,637,147
|
def logout():
""" Logout a user """
session.pop('user_id', None)
session.pop('player_id', None)
return redirect(url_for('index'))
|
d7d375e28a3e432c42b845cccf0adecb37cf46e1
| 3,637,148
|
def get_available_gpus():
"""Returns a list of available GPU devices names. """
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == "GPU"]
|
9c62204fa1bdc8ad22fd56ecad14bde895a08ec6
| 3,637,149
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.