content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def stage_grid(
Dstg,
A,
dx_c,
tte,
min_Rins=None,
recamber=None,
stag=None,
resolution=1.
):
"""Generate an H-mesh for a turbine stage."""
# Change scaling factor on grid points
# Distribute the spacings between stator and rotor
dx_c = np.array([[dx_c[0], dx_c[1] / 2.0], [dx_c[1] / 2.0, dx_c[2]]])
# Streamwise grids for stator and rotor
x_c, ilte = streamwise_grid(dx_c, resolution=resolution)
x = [x_ci * Dstg.cx[0] for x_ci in x_c]
# Generate radial grid
Dr = np.array([Dstg.Dr[:2], Dstg.Dr[1:]])
r = merid_grid(x_c, Dstg.rm, Dr, resolution=resolution)
# Evaluate radial blade angles
r1 = r[0][ilte[0][0], :]
spf = (r1 - r1.min()) / r1.ptp()
chi = np.stack((Dstg.free_vortex_vane(spf), Dstg.free_vortex_blade(spf)))
# If recambering, then tweak the metal angles
if not recamber is None:
dev = np.reshape(recamber, (2, 2, 1))
dev[1] *= -1 # Reverse direction of rotor angles
chi += dev
# Get sections (normalised by axial chord for now)
sect = [
geometry.radially_interpolate_section(
spf, chii, spf, tte, Ai, stag=stagi
)
for chii, Ai, stagi in zip(chi, A, stag)
]
# If we have asked for a minimum inscribed circle, confirm that the
# constraint is not violated
if min_Rins:
for i, row_sect in enumerate(sect):
for rad_sect in row_sect:
current_radius = geometry.largest_inscribed_circle(rad_sect.T)
if current_radius < min_Rins:
raise geometry.GeometryConstraintError(
(
"Row %d, Thickness is too small for the constraint "
"inscribed circle: %.3f < %.3f"
% (i, current_radius, min_Rins)
)
)
# Now we can do b2b grids
rt = [b2b_grid(*args, resolution=resolution) for args in zip(x, r, Dstg.s, Dstg.cx, sect)]
# Offset the rotor so it is downstream of stator
x[1] = x[1] + x[0][-1] - x[1][0]
# fig, ax = plt.subplots()
# ax.plot(x[0],rt[0][:,0,(0,-1)])
# ax.plot(x[1],rt[1][:,0,(0,-1)])
# ax.axis('equal')
# plt.savefig('sect.pdf')
# quit()
return x, r, rt, ilte
|
e19e07e61be0079559ee597475ce017f8f0a6189
| 3,646,130
|
def get_best_trial(trial_list, metric):
"""Retrieve the best trial."""
return max(trial_list, key=lambda trial: trial.last_result.get(metric, 0))
|
c5ddbb9ad00cddaba857d0d0233f6452e6702552
| 3,646,131
|
def make_registry_metaclass(registry_store):
"""Return a new Registry metaclass."""
if not isinstance(registry_store, dict):
raise TypeError("'registry_store' argument must be a dict")
class Registry(type):
"""A metaclass that stores a reference to all registered classes."""
def __new__(mcs, class_name, base_classes, class_dict):
"""Create and returns a new instance of Registry.
The registry is a class named 'class_name' derived from 'base_classes'
that defines 'class_dict' as additional attributes.
The returned class is added to 'registry_store' using
class_dict["REGISTERED_NAME"] as the name, or 'class_name'
if the "REGISTERED_NAME" attribute isn't defined. If the
sentinel value 'LEAVE_UNREGISTERED' is specified as the
name, then the returned class isn't added to
'registry_store'.
The returned class will have the "REGISTERED_NAME" attribute
defined either as its associated key in 'registry_store' or
the 'LEAVE_UNREGISTERED' sentinel value.
"""
registered_name = class_dict.setdefault("REGISTERED_NAME", class_name)
cls = type.__new__(mcs, class_name, base_classes, class_dict)
if registered_name is not LEAVE_UNREGISTERED:
if registered_name in registry_store:
raise ValueError("The name %s is already registered; a different value for the"
" 'REGISTERED_NAME' attribute must be chosen" %
(registered_name))
registry_store[registered_name] = cls
return cls
return Registry
|
c1c0426e4d47323ccd3ab80ff3917253858f1b0c
| 3,646,132
|
def bind11(reactant, max_helix = True):
"""
Returns a list of reaction pathways which can be produced by 1-1 binding
reactions of the argument complex. The 1-1 binding reaction is the
hybridization of two complementary unpaired domains within a single complex
to produce a single unpseudoknotted product complex.
"""
reactions = set()
structure = list(reactant.pair_table)
for (strand_index, strand) in enumerate(structure):
for (domain_index, domain) in enumerate(strand):
# The displacing domain must be free
if structure[strand_index][domain_index] is not None :
continue
start_loc = (strand_index, domain_index)
# search (one direction) around the loop for an open domain that can be bound.
results = find_on_loop(reactant, start_loc, filter_bind11)
assert len(results) == len(find_on_loop(reactant, start_loc, filter_bind11, direction = -1))
for e, (invader, before, target, after) in enumerate(results):
if max_helix:
invader, before, target, after = zipper(
reactant, invader[0], before, target[0], after, filter_bind11)
results[e] = list(map(Loop, [invader, before, target, after]))
# build products
for (loc1s, before, loc2s, after) in results:
# Should be reversed loc2s right?
assert [x == ~y for x,y in zip(loc1s.domains, loc2s.domains)]
product = do_bind11(reactant, loc1s.domain_locs, loc2s.domain_locs)
reaction = PepperReaction([reactant], [product], 'bind11')
if reaction.rate_constant[0] is None:
reaction.rate_constant = (unimolecular_binding_rate(loc1s.dlength, before, after), '/s')
reactions.add(reaction)
return sorted(reactions)
|
45c74105e2f9733092aba3b55edd4dbaa8e9e26e
| 3,646,133
|
def get_all_movie_props(movies_set: pd.DataFrame, flag: int, file_path: str):
"""
Function that returns the data frame of all movie properties from dbpedia
:param movies_set: data set of movies with columns movie id and movie dbpedia uri
:param flag: 1 to generate the data frame from scratch and 0 to read from file
:param file_path: file path to read if flag is not 0
:return: the data frame of all movie properties from dbpedia
"""
cols = ['movie_id', 'prop', 'obj']
if flag == 1:
all_movie_props = obtain_all_movie_props(movies_set, cols)
all_movie_props.to_csv(file_path, mode='w', header=False, index=False)
else:
all_movie_props = pd.read_csv(file_path, header=None)
all_movie_props.columns = cols
all_movie_props = all_movie_props.set_index(cols[0])
return all_movie_props
|
f3b85ce0d5b0e0fa8f28a2f3e8ee7d69c2002ee1
| 3,646,134
|
def convert_to_clocks(duration, f_sampling=200e6, rounding_period=None):
"""
convert a duration in seconds to an integer number of clocks
f_sampling: 200e6 is the CBox sampling frequency
"""
if rounding_period is not None:
duration = max(duration//rounding_period, 1)*rounding_period
clock_duration = int(duration*f_sampling)
return clock_duration
|
602b00af689cc25374b7debd39264b438de44baa
| 3,646,135
|
def account_approved(f):
"""Checks whether user account has been approved, raises a 401 error
otherwise .
"""
def decorator(*args, **kwargs):
if not current_user:
abort(401, {'message': 'Invalid user account.'})
elif not current_user.is_approved:
abort(401, {'message': 'Account has not yet been approved.'})
return f(*args, **kwargs)
return decorator
|
e9f9e7bd15bd1df22540a6a42db95501a26fcce2
| 3,646,136
|
def multiply(x):
"""Multiply operator.
>>> multiply(2)(1)
2
"""
def multiply(y):
return y * x
return multiply
|
77d983090e03820d03777f1f69cfc7b0ef6d88a2
| 3,646,137
|
def tally_transactions(address, txs):
"""Calculate the net value of all deposits, withdrawals and fees
:param address: Address of the account
:param txs: Transactions JSON for the address
:returns: The total net value of all deposits, withdrawals and fees
"""
send_total = 0
for item in txs['result']:
if item['success']:
# Check for deposits/withdrawals
if "MsgSend" in item['messageTypes']:
if item['messages'][0]['content']['toAddress'] != address:
# Remove withdrawals
send_total -= translate_basecro_to_cro(Decimal(item['messages'][0]['content']['amount'][0]['amount']))
else:
# Add deposits
send_total += translate_basecro_to_cro(Decimal(item['messages'][0]['content']['amount'][0]['amount']))
# Remove fees
send_total -= translate_basecro_to_cro(Decimal(item['fee'][0]['amount']))
return send_total
|
6eaca1e7be11f9af254bcc491ff661413d8745f4
| 3,646,138
|
def expose(policy):
"""
Annotate a method to permit access to contexts matching an authorization
policy. The annotation may be specified multiple times. Methods lacking any
authorization policy are not accessible.
::
@mitogen.service.expose(policy=mitogen.service.AllowParents())
def unsafe_operation(self):
...
:param mitogen.service.Policy policy:
The policy to require.
"""
def wrapper(func):
func.mitogen_service__policies = [policy] + getattr(
func, "mitogen_service__policies", []
)
return func
return wrapper
|
74caed36885e5ea947a2ecdac9a2cddf2f5f51b0
| 3,646,139
|
def _bytes_feature(value):
"""Creates a bytes feature from the passed value.
Args:
value: An numpy array.
Returns:
A TensorFlow feature.
"""
return tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[value.astype(np.float32).tostring()]))
|
e13ac22bef91af7847aecdb558e849de27e89623
| 3,646,140
|
from typing import Union
from typing import OrderedDict
def get_cell_phase(
adata: anndata.AnnData,
layer: str = None,
gene_list: Union[OrderedDict, None] = None,
refine: bool = True,
threshold: Union[float, None] = 0.3,
) -> pd.DataFrame:
"""Compute cell cycle phase scores for cells in the population
Arguments
---------
adata: :class:`~anndata.AnnData`
layer: `str` or None (default: `None`)
The layer of data to use for calculating correlation. If None, use adata.X.
gene_list: `OrderedDict` or None (default: `None`)
OrderedDict of marker genes to use for cell cycle phases. If None, the default
list will be used.
refine: `bool` (default: `True`)
whether to refine the gene lists based on how consistent the expression is among
the groups
threshold: `float` or None (default: `0.3`)
threshold on correlation coefficient used to discard genes (expression of each
gene is compared to the bulk expression of the group and any gene with a correlation
coefficient less than this is discarded)
Returns
-------
Cell cycle scores indicating the likelihood a given cell is in a given cell cycle phase
"""
# get list of genes if one is not provided
if gene_list is None:
cell_phase_genes = get_cell_phase_genes(adata, layer, refine=refine, threshold=threshold)
else:
cell_phase_genes = gene_list
adata.uns["cell_phase_genes"] = cell_phase_genes
# score each cell cycle phase and Z-normalize
phase_scores = pd.DataFrame(batch_group_score(adata, layer, cell_phase_genes))
normalized_phase_scores = phase_scores.sub(phase_scores.mean(axis=1), axis=0).div(phase_scores.std(axis=1), axis=0)
normalized_phase_scores_corr = normalized_phase_scores.transpose()
normalized_phase_scores_corr["G1-S"] = [1, 0, 0, 0, 0]
normalized_phase_scores_corr["S"] = [0, 1, 0, 0, 0]
normalized_phase_scores_corr["G2-M"] = [0, 0, 1, 0, 0]
normalized_phase_scores_corr["M"] = [0, 0, 0, 1, 0]
normalized_phase_scores_corr["M-G1"] = [0, 0, 0, 0, 1]
phase_list = ["G1-S", "S", "G2-M", "M", "M-G1"]
# final scores for each phaase are correlation of expression profile with vectors defined above
cell_cycle_scores = normalized_phase_scores_corr.corr()
tmp = -len(phase_list)
cell_cycle_scores = cell_cycle_scores[tmp:].transpose()[: -len(phase_list)]
# pick maximal score as the phase for that cell
cell_cycle_scores["cell_cycle_phase"] = cell_cycle_scores.idxmax(axis=1)
cell_cycle_scores["cell_cycle_phase"] = cell_cycle_scores["cell_cycle_phase"].astype("category")
cell_cycle_scores["cell_cycle_phase"].cat.set_categories(phase_list, inplace=True)
def progress_ratio(x, phase_list):
ind = phase_list.index(x["cell_cycle_phase"])
return x[phase_list[(ind - 1) % len(phase_list)]] - x[phase_list[(ind + 1) % len(phase_list)]]
# interpolate position within given cell cycle phase
cell_cycle_scores["cell_cycle_progress"] = cell_cycle_scores.apply(
lambda x: progress_ratio(x, list(phase_list)), axis=1
)
cell_cycle_scores.sort_values(
["cell_cycle_phase", "cell_cycle_progress"],
ascending=[True, False],
inplace=True,
)
# order of cell within cell cycle phase
cell_cycle_scores["cell_cycle_order"] = cell_cycle_scores.groupby("cell_cycle_phase").cumcount()
cell_cycle_scores["cell_cycle_order"] = cell_cycle_scores.groupby("cell_cycle_phase")["cell_cycle_order"].apply(
lambda x: x / (len(x) - 1)
)
return cell_cycle_scores
|
9b379c42cd409893d51885f5580b26b9700547bf
| 3,646,141
|
def variational_lower_bound(prediction):
"""
This is the variational lower bound derived in
Auto-Encoding Variational Bayes, Kingma & Welling, 2014
:param [posterior_means, posterior_logvar,
data_means, data_logvar, originals]
posterior_means: predicted means for the posterior
posterior_logvar: predicted log variances for the posterior
data_means: predicted mean parameter
for the voxels modelled as Gaussians
data_logvar: predicted log variance parameter
for the voxels modelled as Gaussians
originals: the original inputs
:return:
"""
# log_2pi = np.log(2*np.pi)
log_2pi = 1.837877
assert len(prediction) >= 5, \
"please see the returns of network/vae.py" \
"for the prediction list format"
posterior_means, posterior_logvar = prediction[:2]
data_means, data_logvar = prediction[2:4]
originals = prediction[4]
squared_diff = tf.square(data_means - originals)
log_likelihood = \
data_logvar + log_2pi + tf.exp(-data_logvar) * squared_diff
# batch_size = tf.shape(log_likelihood)[0]
batch_size = log_likelihood.get_shape().as_list()[0]
log_likelihood = tf.reshape(log_likelihood, shape=[batch_size, -1])
log_likelihood = -0.5 * tf.reduce_sum(log_likelihood, axis=[1])
KL_divergence = 1 + posterior_logvar \
- tf.square(posterior_means) \
- tf.exp(posterior_logvar)
KL_divergence = -0.5 * tf.reduce_sum(KL_divergence, axis=[1])
return tf.reduce_mean(KL_divergence - log_likelihood)
|
bcbc9a660f07fe677f823ee3aeb284817e94601d
| 3,646,142
|
import base64
def didGen(vk, method="dad"):
"""
didGen accepts an EdDSA (Ed25519) key in the form of a byte string and returns a DID.
:param vk: 32 byte verifier/public key from EdDSA (Ed25519) key
:param method: W3C did method string. Defaults to "dad".
:return: W3C DID string
"""
if vk is None:
return None
# convert verkey to jsonable unicode string of base64 url-file safe
vk64u = base64.urlsafe_b64encode(vk).decode("utf-8")
return "did:{0}:{1}".format(method, vk64u)
|
9991491ab486d8960633190e3d3baa9058f0da50
| 3,646,143
|
import pickle
def load_dataset(datapath):
"""Extract class label info """
with open(datapath + "/experiment_dataset.dat", "rb") as f:
data_dict = pickle.load(f)
return data_dict
|
3a0d8ef9c48036879b32ab0e74e52429418297c0
| 3,646,144
|
def deleteupload():
"""Deletes an upload.
An uploads_id is given and that entry is then removed from the uploads table
in the database.
"""
uploads_id = request.args.get('uploads_id')
if not uploads.exists(uploads_id=uploads_id):
return bad_json_response(
'BIG OOPS: Something went wrong deleting the file.'
)
uploads.delete(uploads_id=uploads_id)
return good_json_response('success')
|
df18801b287569f1fa1114fc7059a415b82913d0
| 3,646,145
|
from typing import Dict
from pathlib import Path
import json
def load_json(filename: str) -> Dict:
"""Read JSON file from metadata folder
Args:
filename: Name of metadata file
Returns:
dict: Dictionary of data
"""
filepath = (
Path(__file__).resolve().parent.parent.joinpath("metadata").joinpath(filename)
)
metadata: Dict = json.loads(filepath.read_text())
return metadata
|
37d9f08344cf2a544c12fef58992d781556a9efd
| 3,646,147
|
def get_riemann_sum(x, delta_x):
"""
Returns the riemann `sum` given a `function` and
the input `x` and `delta_x`
Parameters
----------
x : list
List of numbers returned by `np.linspace` given a lower
and upper bound, and the number of intervals
delta_x :
The interval
Returns
-------
float
The integral sum
"""
return sum(f(x)*delta_x)
|
dd80d12581533fa4074411845050f29193a03432
| 3,646,148
|
def MPO_rand(n, bond_dim, phys_dim=2, normalize=True, cyclic=False,
herm=False, dtype=float, **mpo_opts):
"""Generate a random matrix product state.
Parameters
----------
n : int
The number of sites.
bond_dim : int
The bond dimension.
phys_dim : int, optional
The physical (site) dimensions, defaults to 2.
normalize : bool, optional
Whether to normalize the operator such that ``trace(A.H @ A) == 1``.
cyclic : bool, optional
Generate a MPO with periodic boundary conditions or not, default is
open boundary conditions.
dtype : {float, complex} or numpy dtype, optional
Data type of the tensor network.
herm : bool, optional
Whether to make the matrix hermitian (or symmetric if real) or not.
mpo_opts
Supplied to :class:`~quimb.tensor.tensor_1d.MatrixProductOperator`.
"""
cyc_shp = (bond_dim,) if cyclic else ()
shapes = [(*cyc_shp, bond_dim, phys_dim, phys_dim),
*((bond_dim, bond_dim, phys_dim, phys_dim),) * (n - 2),
(bond_dim, *cyc_shp, phys_dim, phys_dim)]
def gen_data(shape):
data = randn(shape, dtype=dtype)
if not herm:
return data
trans = (0, 2, 1) if len(shape) == 3 else (0, 1, 3, 2)
return data + data.transpose(*trans).conj()
arrays = map(lambda x: x / norm_fro_dense(x)**(1 / (x.ndim - 1)),
map(gen_data, shapes))
rmpo = MatrixProductOperator(arrays, **mpo_opts)
if normalize:
rmpo /= (rmpo.H @ rmpo)**0.5
return rmpo
|
22220095b5cfcb3625edf3cde59e03fa37cd5423
| 3,646,149
|
def get_short_size(size_bytes):
"""
Get a file size string in short format.
This function returns:
"B" size (e.g. 2) when size_bytes < 1KiB
"KiB" size (e.g. 345.6K) when size_bytes >= 1KiB and size_bytes < 1MiB
"MiB" size (e.g. 7.8M) when size_bytes >= 1MiB
size_bytes: File size in bytes
"""
if size_bytes < 1024:
return str(size_bytes)
if size_bytes < 1048576:
return f"{size_bytes / 1024:.1f}K"
return f"{size_bytes / 1048576:.1f}M"
|
ebc9ba25c01dedf0d15b9e2a21b67989763bc8c8
| 3,646,150
|
def score_from_srl(srl_path, truth_path, freq, verbose=False):
"""
Given source list output by PyBDSF and training truth catalogue,
calculate the official score for the sources identified in the srl.
Args:
srl_path (`str`): Path to source list (.srl file)
truth_path (`str`): Path to training truth catalogue
freq (`int`): Image frequency band (560, 1400 or 9200 MHz)
verbose (`bool`): True to print out size ratio info
"""
truth_df = load_truth_df(truth_path)
# Predict size ID and correct the Maj and Min values:
cat_df = cat_df_from_srl(srl_path)
scorer = Sdc1Scorer(cat_df, truth_df, freq)
score = scorer.run(train=True, detail=True, mode=1)
return score
|
87cfdd7ed7c1a42fc3a4080289e7e34be6a2a85a
| 3,646,152
|
def get_feature_read(key, max_num_bbs=None):
"""Choose the right feature function for the given key to parse TFRecords
Args:
key: the feature name
max_num_bbs: Max number of bounding boxes (used for `bounding_boxes` and `classes`)
max_num_groups: Number of pre-defined groups (used for `clustered_bounding_boxes`)
"""
if key in ['im_id', 'num_boxes']:
return tf.FixedLenFeature((), tf.int64)
elif key in ['bounding_boxes']:
assert max_num_bbs is not None
return tf.FixedLenFeature((max_num_bbs, 4), tf.float32)
elif key in ['classes']:
assert max_num_bbs is not None
return tf.FixedLenFeature((max_num_bbs,), tf.int64)
else:
raise SystemExit("Unknown feature", key)
|
6ec7f06e900baedec19950c0f4742da9c4df1514
| 3,646,153
|
import numpy
import time
def kernel(cc, eris, t1=None, t2=None, max_cycle=50, tol=1e-8, tolnormt=1e-6,
verbose=logger.INFO):
"""Exactly the same as pyscf.cc.ccsd.kernel, which calls a
*local* energy() function."""
if isinstance(verbose, logger.Logger):
log = verbose
else:
log = logger.Logger(cc.stdout, verbose)
if t1 is None and t2 is None:
t1, t2 = cc.init_amps(eris)[1:]
elif t1 is None:
nocc = cc.nocc
nvir = cc.nmo - nocc
t1 = numpy.zeros((nocc,nvir), eris.dtype)
elif t2 is None:
t2 = cc.init_amps(eris)[2]
cput1 = cput0 = (time.clock(), time.time())
nocc, nvir = t1.shape
eold = 0
eccsd = 0
if cc.diis:
adiis = lib.diis.DIIS(cc, cc.diis_file)
adiis.space = cc.diis_space
else:
adiis = lambda t1,t2,*args: (t1,t2)
conv = False
for istep in range(max_cycle):
t1new, t2new = cc.update_amps(t1, t2, eris)
normt = numpy.linalg.norm(t1new-t1) + numpy.linalg.norm(t2new-t2)
t1, t2 = t1new, t2new
t1new = t2new = None
if cc.diis:
t1, t2 = cc.diis(t1, t2, istep, normt, eccsd-eold, adiis)
eold, eccsd = eccsd, energy(cc, t1, t2, eris)
log.info('istep = %d E(CCSD) = %.15g dE = %.9g norm(t1,t2) = %.6g',
istep, eccsd, eccsd - eold, normt)
cput1 = log.timer('CCSD iter', *cput1)
if abs(eccsd-eold) < tol and normt < tolnormt:
conv = True
break
log.timer('CCSD', *cput0)
return conv, eccsd, t1, t2
|
24917c48cbdb2062914462ad3f354cdd0e4e6318
| 3,646,154
|
async def getAllDestinyIDs():
"""Returns a list with all discord members destiny ids"""
select_sql = """
SELECT
destinyID
FROM
"discordGuardiansToken";"""
async with (await get_connection_pool()).acquire(timeout=timeout) as connection:
result = await connection.fetch(select_sql)
return [x[0] for x in result]
|
22e94165cc0f50c8458be152d77231f30f7383b8
| 3,646,156
|
def login():
"""Handles login for Gello."""
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password.')
return render_template('auth/login.html', form=form)
|
5000c52d652c114a5b69b403fca9809dfa9e6bca
| 3,646,157
|
def create_loss_functions(interconnector_coefficients, demand_coefficients, demand):
"""Creates a loss function for each interconnector.
Transforms the dynamic demand dependendent interconnector loss functions into functions that only depend on
interconnector flow. i.e takes the function f and creates g by pre-calculating the demand dependent terms.
f(inter_flow, flow_coefficient, nsw_demand, nsw_coefficient, qld_demand, qld_coefficient) = inter_losses
becomes
g(inter_flow) = inter_losses
The mathematics of the demand dependent loss functions is described in the
:download:`Marginal Loss Factors documentation section 3 to 5 <../../docs/pdfs/Marginal Loss Factors for the 2020-21 Financial year.pdf>`.
Examples
--------
>>> import pandas as pd
Some arbitrary regional demands.
>>> demand = pd.DataFrame({
... 'region': ['VIC1', 'NSW1', 'QLD1', 'SA1'],
... 'loss_function_demand': [6000.0 , 7000.0, 5000.0, 3000.0]})
Loss model details from 2020 Jan NEM web LOSSFACTORMODEL file
>>> demand_coefficients = pd.DataFrame({
... 'interconnector': ['NSW1-QLD1', 'NSW1-QLD1', 'VIC1-NSW1', 'VIC1-NSW1', 'VIC1-NSW1'],
... 'region': ['NSW1', 'QLD1', 'NSW1', 'VIC1', 'SA1'],
... 'demand_coefficient': [-0.00000035146, 0.000010044, 0.000021734, -0.000031523, -0.000065967]})
Loss model details from 2020 Jan NEM web INTERCONNECTORCONSTRAINT file
>>> interconnector_coefficients = pd.DataFrame({
... 'interconnector': ['NSW1-QLD1', 'VIC1-NSW1'],
... 'loss_constant': [0.9529, 1.0657],
... 'flow_coefficient': [0.00019617, 0.00017027],
... 'from_region_loss_share': [0.5, 0.5]})
Create the loss functions
>>> loss_functions = create_loss_functions(interconnector_coefficients, demand_coefficients, demand)
Lets use one of the loss functions, first get the loss function of VIC1-NSW1 and call it g
>>> g = loss_functions[loss_functions['interconnector'] == 'VIC1-NSW1']['loss_function'].iloc[0]
Calculate the losses at 600 MW flow
>>> print(g(600.0))
-70.87199999999996
Now for NSW1-QLD1
>>> h = loss_functions[loss_functions['interconnector'] == 'NSW1-QLD1']['loss_function'].iloc[0]
>>> print(h(600.0))
35.70646799999993
Parameters
----------
interconnector_coefficients : pd.DataFrame
====================== ========================================================================================
Columns: Description:
interconnector unique identifier of a interconnector (as `str`)
loss_constant the constant term in the interconnector loss factor equation (as np.float64)
flow_coefficient the coefficient of the interconnector flow variable in the loss factor equation
(as np.float64)
from_region_loss_share the proportion of loss attribute to the from region, remainer are attributed to the to
region (as np.float64)
====================== ========================================================================================
demand_coefficients : pd.DataFrame
================== =========================================================================================
Columns: Description:
interconnector unique identifier of a interconnector (as `str`)
region the market region whose demand the coefficient applies too, required (as `str`)
demand_coefficient the coefficient of regional demand variable in the loss factor equation (as `np.float64`)
================== =========================================================================================
demand : pd.DataFrame
==================== =====================================================================================
Columns: Description:
region unique identifier of a region (as `str`)
loss_function_demand the estimated regional demand, as calculated by initial supply + demand forecast,
in MW (as `np.float64`)
==================== =====================================================================================
Returns
-------
pd.DataFrame
loss_functions
================ ============================================================================================
Columns: Description:
interconnector unique identifier of a interconnector (as `str`)
loss_function a `function` object that takes interconnector flow (as `float`) an input and returns
interconnector losses (as `float`).
================ ============================================================================================
"""
demand_loss_factor_offset = pd.merge(demand_coefficients, demand, 'inner', on=['region'])
demand_loss_factor_offset['offset'] = demand_loss_factor_offset['loss_function_demand'] * \
demand_loss_factor_offset['demand_coefficient']
demand_loss_factor_offset = demand_loss_factor_offset.groupby('interconnector', as_index=False)['offset'].sum()
loss_functions = pd.merge(interconnector_coefficients, demand_loss_factor_offset, 'left', on=['interconnector'])
loss_functions['loss_constant'] = loss_functions['loss_constant'] + loss_functions['offset'].fillna(0)
loss_functions['loss_function'] = \
loss_functions.apply(lambda x: create_function(x['loss_constant'], x['flow_coefficient']), axis=1)
return loss_functions.loc[:, ['interconnector', 'loss_function', 'from_region_loss_share']]
|
1522b4506d4dad40e2b3d16bdd8ebd92d9b46401
| 3,646,158
|
def num2proto(pnum):
"""Protocol number to name"""
# Look for the common ones first
if pnum == 6:
return "tcp"
elif pnum == 17:
return "udp"
elif pnum == 1:
return "icmp"
elif pnum == 58:
# Use the short form of icmp-ipv6 when appropriate
return "icmpv6"
# Get cached proto table, else create new one
global proto_table
if not bool(proto_table):
proto_table = ProtocolTable()
pname = proto_table[pnum]
# If not found, return the number as a string
if pname == "Unassigned":
return str(pnum)
return pname
|
ad68b0fe530d63de62087eb23e5cacca0d48b996
| 3,646,159
|
def get_problem_size(problem_size, params):
"""compute current problem size"""
if callable(problem_size):
problem_size = problem_size(params)
if isinstance(problem_size, (str, int, np.integer)):
problem_size = (problem_size, )
current_problem_size = [1, 1, 1]
for i, s in enumerate(problem_size):
if isinstance(s, str):
current_problem_size[i] = int(
eval(replace_param_occurrences(s, params)))
elif isinstance(s, (int, np.integer)):
current_problem_size[i] = s
else:
raise TypeError(
"Error: problem_size should only contain strings or integers")
return current_problem_size
|
c71394f081a7f0d00fcae653dffc439bc7b1b3b1
| 3,646,160
|
def interpolate(x,
size=None,
scale_factor=None,
mode='nearest',
align_corners=False,
align_mode=0,
data_format='NCHW',
name=None):
"""
This op resizes a batch of images.
The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
or 4-D (num_batches, channels, in_h, in_w), or a 5-D Tensor of the shape
(num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
Where in_w is width of the input tensor, in_h is the height of the input tensor,
in_d is the depth of the intput tensor.
and the resizing only applies on the three dimensions(depth, height and width).
Supporting resample methods:
'linear' : Linear interpolation
'bilinear' : Bilinear interpolation
'trilinear' : Trilinear interpolation
'nearest' : Nearest neighbor interpolation
'bicubic' : Bicubic interpolation
'area': Area interpolation
Linear interpolation is the method of using a line connecting two known quantities
to determine the value of an unknown quantity between the two known quantities.
Nearest neighbor interpolation is to perform nearest neighbor interpolation
in both the 3rd dimension(in height direction) and the 4th dimension(in width
direction) on input tensor.
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this op) on a rectilinear 2D grid. The key idea is
to perform linear interpolation first in one direction, and then
again in the other direction.
Trilinear interpolation is an extension of linear interpolation for
interpolating functions of three variables (e.g. D-direction,
H-direction and W-direction in this op) on a rectilinear 3D grid.
The linear interpolation is performed on three directions.
align_corners and align_mode are optional parameters,the calculation method
of interpolation can be selected by them.
Bicubic interpolation is an extension of cubic interpolation for interpolating
data points on a two-dimensional regular grid. The interpolated surface is
smoother than corresponding surfaces obtained by bilinear interpolation or
nearest-neighbor interpolation.
Area interpolation is to perform area interpolation
in both the 3rd dimension(in height direction) , the 4th dimension(in width
direction) and the 5th dimension(in depth direction) on input tensor. Set to
area will directly call `paddle.nn.functional.adaptive_avg_pool1d` or
`paddle.nn.functional.adaptive_avg_pool2d` or `paddle.nn.functional.adaptive_avg_pool3d`.
Example:
.. code-block:: text
For scale_factor:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Linear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,W_in)
output: (N,C,W_out) where:
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,W_in)
output: (N,C,W_out) where:
W_out = W_{in} * scale_{factor}
Nearest neighbor interpolation:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = floor (H_{in} * scale_{factor})
W_out = floor (W_{in} * scale_{factor})
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Bicubic interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Trilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
For details of linear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Linear_interpolation.
For details of nearest neighbor interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
For details of bilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation.
For details of trilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Trilinear_interpolation.
For details of bicubic interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bicubic_interpolation
Parameters:
x (Tensor): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
size (list|tuple|Tensor|None): Output shape of image resize
layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w)
when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor.
Default: None. If a list/tuple, each element can be an integer or a Tensor of shape: [1].
If a Tensor, its dimensions size should be a 1.
scale_factor (float|Tensor|list|tuple|None): The multiplier for the input height or width. At
least one of :attr:`size` or :attr:`scale_factor` must be set.
And :attr:`size` has a higher priority than :attr:`scale_factor`.Has to match input size if it is either a list or a tuple or a Tensor.
Default: None.
mode (str): The resample method. It supports 'linear', 'area', 'nearest', 'bilinear',
'bicubic' and 'trilinear' currently. Default: 'nearest'
align_corners(bool) : An optional bool, If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the
corner pixels.This only has an effect when 'linear', 'bilinear', 'bicubic' or 'trilinear'.
Default: False
align_mode(int) : An optional for linear/bilinear/trilinear interpolation. Refer to the formula in the example above,
it can be \'0\' for src_idx = scale_factor*(dst_indx+0.5)-0.5 , can be \'1\' for
src_idx = scale_factor*dst_index.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
`"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels),
A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
Raises:
TypeError: size should be a list or tuple or Tensor.
ValueError: The 'mode' of image_resize can only be 'linear', 'bilinear',
'trilinear', 'bicubic', 'area' or 'nearest' currently.
ValueError: 'linear' only support 3-D tensor.
ValueError: 'bilinear' and 'bicubic' only support 4-D tensor.
ValueError: 'nearest' only support 4-D or 5-D tensor.
ValueError: 'trilinear' only support 5-D tensor.
ValueError: One of size and scale_factor must not be None.
ValueError: size length should be 1 for input 3-D tensor.
ValueError: size length should be 2 for input 4-D tensor.
ValueError: size length should be 3 for input 5-D tensor.
ValueError: scale_factor should be greater than zero.
TypeError: align_corners should be a bool value
ValueError: align_mode can only be '0' or '1'
ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
Examples:
.. code-block:: python
import paddle
import numpy as np
import paddle.nn.functional as F
# given out size
input_data = np.random.rand(2,3,6,10).astype("float32")
x = paddle.to_tensor(input_data)
output_1 = F.interpolate(x=x, size=[12,12])
print(output_1.shape)
# [2L, 3L, 12L, 12L]
# given scale
output_2 = F.interpolate(x=x, scale_factor=[2,1])
print(output_2.shape)
# [2L, 3L, 12L, 10L]
# bilinear interp
output_3 = F.interpolate(x=x, scale_factor=[2,1], mode="bilinear")
print(output_2.shape)
# [2L, 3L, 12L, 10L]
"""
data_format = data_format.upper()
resample = mode.upper()
resample_type = mode.lower()
resample_methods = [
'LINEAR',
'BILINEAR',
'TRILINEAR',
'NEAREST',
'BICUBIC',
'AREA',
]
if resample not in resample_methods:
raise ValueError(
"The 'resample' of image_resize can only be 'area', 'linear', 'bilinear', 'trilinear', "
" 'bicubic' or 'nearest' currently.")
if resample in ['LINEAR'] and len(x.shape) != 3:
raise ValueError("'linear' only support 3-D tensor.")
if resample in ['NEAREST'] and len(x.shape) != 4 and len(x.shape) != 5:
raise ValueError("'NEAREST' only support 4-D or 5-D tensor.")
if resample in ['BILINEAR', 'BICUBIC'] and len(x.shape) != 4:
raise ValueError("'bilinear' and 'bicubic' only support 4-D tensor.")
if resample == 'TRILINEAR' and len(x.shape) != 5:
raise ValueError("'trilinear'only support 5-D tensor.")
if size is None and scale_factor is None:
raise ValueError("One of size and scale_factor must not be None.")
if not isinstance(align_corners, bool):
raise TypeError("Attr align_corners should be a bool value")
if align_mode != 0 and align_mode != 1:
raise ValueError("align_mode can only be 0 or 1")
if align_corners != 0 and resample == 'NEAREST':
raise ValueError(
"align_corners option can only be set with the interpolating modes: linear | bilinear | bicubic | trilinear"
)
if resample == 'AREA':
if isinstance(size, list) or isinstance(size, tuple) or isinstance(
size, Variable):
if len(size) == 0:
raise ValueError("output size can not be empty")
if len(x.shape) == 3:
return paddle.nn.functional.adaptive_avg_pool1d(x, size)
elif len(x.shape) == 4:
return paddle.nn.functional.adaptive_avg_pool2d(x, size)
elif len(x.shape) == 5:
return paddle.nn.functional.adaptive_avg_pool3d(x, size)
helper = LayerHelper('{}_interp_v2'.format(resample_type), **locals())
dtype = helper.input_dtype(input_param_name='x')
if len(x.shape) == 3 and data_format not in ['NCW', 'NWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCW` or `NWC` supported for 3-D input.")
elif len(x.shape) == 4 and data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCHW` or `NHWC` supported for 4-D input.")
elif len(x.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCDHW` or `NDHWC` supported for 5-D input.")
def _is_list_or_turple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW':
data_layout = 'NCHW'
if data_format == 'NHWC' or data_format == 'NDHWC' or data_format == 'NWC':
data_layout = 'NHWC'
if resample == 'NEAREST':
align_corners = False
inputs = {"X": x}
attrs = {
"out_d": -1,
"out_h": -1,
"out_w": -1,
"interp_method": resample_type,
"align_corners": align_corners,
"align_mode": align_mode,
"data_layout": data_layout
}
out_shape = size
scale = scale_factor
if out_shape is not None and scale is not None:
raise ValueError("Only one of size or scale_factor should be defined.")
if out_shape is not None:
if isinstance(out_shape, Variable) and not in_dynamic_mode():
out_shape.stop_gradient = True
inputs['OutSize'] = out_shape
else:
if in_dynamic_mode():
if isinstance(out_shape, Variable):
out_shape = list(out_shape.numpy())
for i, dim in enumerate(out_shape):
if isinstance(dim, Variable):
out_shape[i] = dim.numpy()[0]
if not (_is_list_or_turple_(out_shape)):
raise TypeError("size should be a list or tuple or Variable.")
# Validate the shape
contain_var = False
for dim_idx, dim_size in enumerate(out_shape):
if isinstance(dim_size, Variable):
contain_var = True
continue
assert dim_size > 0, (
"Each dimension size given in out_shape must be greater than 0."
)
if contain_var:
new_size_tensor = []
size_list = []
for dim in out_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_size_tensor.append(dim)
size_list.append(-1)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference(
'int32')
fill_constant(
[1], 'int32', dim, force_cpu=True, out=temp_out)
new_size_tensor.append(temp_out)
size_list.append(dim)
inputs['SizeTensor'] = new_size_tensor
if len(x.shape) == 3:
if len(out_shape) != 1:
raise ValueError(
"size length should be 2 for input 3-D tensor")
if contain_var:
attrs['out_w'] = size_list[0]
else:
out_shape = list(map(int, out_shape))
attrs['out_w'] = out_shape[0]
if len(x.shape) == 4:
if len(out_shape) != 2:
raise ValueError("size length should be 2 for "
"input 4-D tensor.")
if contain_var:
attrs['out_h'] = size_list[0]
attrs['out_w'] = size_list[1]
else:
out_shape = list(map(int, out_shape))
attrs['out_h'] = out_shape[0]
attrs['out_w'] = out_shape[1]
if len(x.shape) == 5:
if len(out_shape) != 3:
raise ValueError("size length should be 3 for "
"input 5-D tensor.")
if contain_var:
attrs['out_d'] = size_list[0]
attrs['out_h'] = size_list[1]
attrs['out_w'] = size_list[2]
else:
out_shape = list(map(int, out_shape))
attrs['out_d'] = out_shape[0]
attrs['out_h'] = out_shape[1]
attrs['out_w'] = out_shape[2]
else:
if in_dynamic_mode() and isinstance(scale, Variable):
scale = list(scale.numpy())
if isinstance(scale, Variable):
scale.stop_gradient = True
inputs["Scale"] = scale
elif isinstance(scale, float) or isinstance(scale, int):
if scale <= 0:
raise ValueError("Attr(scale) should be greater than zero.")
scale_list = []
for i in range(len(x.shape) - 2):
scale_list.append(scale)
attrs['scale'] = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple):
if len(scale) != len(x.shape) - 2:
raise ValueError("scale_shape length should be {} for "
"input {}-D tensor.".format(
len(x.shape) - 2, len(x.shape)))
for value in scale:
if value <= 0:
raise ValueError("Attr(scale) should be greater than zero.")
attrs['scale'] = list(map(float, scale))
else:
raise TypeError(
"Attr(scale)'s type should be float, int, list, tuple, or Tensor."
)
if in_dynamic_mode():
attr_list = []
for k, v in attrs.items():
attr_list.append(k)
attr_list.append(v)
dy_attr = tuple(attr_list)
if resample_type == "linear":
out = _C_ops.linear_interp_v2(x, *dy_attr)
elif resample_type == "bilinear":
out = _C_ops.bilinear_interp_v2(x, *dy_attr)
elif resample_type == "trilinear":
out = _C_ops.trilinear_interp_v2(x, *dy_attr)
elif resample_type == "nearest":
out = _C_ops.nearest_interp_v2(x, *dy_attr)
elif resample_type == "bicubic":
out = _C_ops.bicubic_interp_v2(x, *dy_attr)
return out
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='{}_interp_v2'.format(resample_type),
inputs=inputs,
outputs={"Out": out},
attrs=attrs)
return out
|
a9e03ecc89cdb623922984d3cce9b6cb114419b9
| 3,646,162
|
from datetime import datetime
def encode_auth_token(user_id):
"""
Generates the Auth Token
:return: string
"""
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=90),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
app.config.get('SECRET_KEY'),
algorithm='HS256'
)
except Exception as e:
return e
|
5239b2d85e3c4c1f4c6f74297118295c0bf7d532
| 3,646,163
|
from typing import List
from typing import Tuple
from typing import Dict
def build_docs_for_packages(
current_packages: List[str],
docs_only: bool,
spellcheck_only: bool,
for_production: bool,
jobs: int,
verbose: bool,
) -> Tuple[Dict[str, List[DocBuildError]], Dict[str, List[SpellingError]]]:
"""Builds documentation for all packages and combines errors."""
all_build_errors: Dict[str, List[DocBuildError]] = defaultdict(list)
all_spelling_errors: Dict[str, List[SpellingError]] = defaultdict(list)
with with_group("Cleaning documentation files"):
for package_name in current_packages:
console.print(f"[info]{package_name:60}:[/] Cleaning files")
builder = AirflowDocsBuilder(package_name=package_name, for_production=for_production)
builder.clean_files()
if jobs > 1:
run_in_parallel(
all_build_errors,
all_spelling_errors,
current_packages,
docs_only,
for_production,
jobs,
spellcheck_only,
verbose,
)
else:
run_sequentially(
all_build_errors,
all_spelling_errors,
current_packages,
docs_only,
for_production,
spellcheck_only,
verbose,
)
return all_build_errors, all_spelling_errors
|
c4196c139fda703c90c60507156cce8cb29da98e
| 3,646,164
|
def _inspect_output_dirs_test(ctx):
"""Test verifying output directories used by a test."""
env = analysistest.begin(ctx)
# Assert that the output bin dir observed by the aspect added by analysistest
# is the same as those observed by the rule directly, even when that's
# under a config transition and therefore not the same as the bin dir
# used by the test rule.
bin_path = analysistest.target_bin_dir_path(env)
target_under_test = analysistest.target_under_test(env)
asserts.false(env, not bin_path, "bin dir path not found.")
asserts.false(
env,
bin_path == ctx.bin_dir.path,
"bin dir path expected to differ between test and target_under_test.",
)
asserts.equals(env, bin_path, target_under_test[_OutputDirInfo].bin_path)
return analysistest.end(env)
|
de14b2d4514792d4b2427ff3ef4fae6e7af8e31d
| 3,646,165
|
import ray
def wait(object_refs, num_returns=1, timeout=None):
"""Return a list of IDs that are ready and a list of IDs that are not.
This method is identical to `ray.wait` except it adds support for tuples
and ndarrays.
Args:
object_refs (List[ObjectRef], Tuple(ObjectRef), np.array(ObjectRef)):
List like of object refs for objects that may or may not be ready.
Note that these IDs must be unique.
num_returns (int): The number of object refs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
Returns:
A list of object refs that are ready and a list of the remaining object
IDs.
"""
if isinstance(object_refs, (tuple, np.ndarray)):
return ray.wait(
list(object_refs), num_returns=num_returns, timeout=timeout)
return ray.wait(object_refs, num_returns=num_returns, timeout=timeout)
|
e56ffd1700715049cc899d27735bb98da47fa2b6
| 3,646,166
|
def train(
data,
feature_names,
tagset,
epochs,
optimizer,
score_func=perceptron_score,
step_size=1,
):
"""
Trains the model on the data and returns the parameters
:param data: Array of dictionaries representing the data. One dictionary for each data point (as created by the
make_data_point function).
:param feature_names: Array of Strings. The list of feature names.
:param tagset: Array of Strings. The list of tags.
:param epochs: Int. The number of epochs to train
:return: FeatureVector. The learned parameters.
"""
parameters = FeatureVector({}) # creates a zero vector
gradient = get_gradient(
data, feature_names, tagset, parameters, score_func
)
def training_observer(epoch, parameters):
"""
Evaluates the parameters on the development data, and writes out the parameters to a 'model.iter'+epoch and
the predictions to 'ner.dev.out'+epoch.
:param epoch: int. The epoch
:param parameters: Feature Vector. The current parameters
:return: Double. F1 on the development data
"""
(_, _, f1) = evaluate(
dev_data, parameters, feature_names, tagset, score_func
)
return f1
# return the final parameters
return optimizer(
sample_num,
epochs,
gradient,
parameters,
training_observer,
step_size=step_size,
)
|
51a0a506deecf56067ef185848d7f706c9da0d3e
| 3,646,167
|
def summed_timeseries(timeseries):
"""
Give sum of value series against timestamps for given timeseries containing several values per one timestamp
:param timeseries:
:return:
"""
sum_timeseries = []
for i in range(len(timeseries)):
if len(timeseries[i])>1:
sum_timeseries.append([timeseries[i][0], '%.3f' % (sum(timeseries[i][1:]))])
return sum_timeseries
|
618505f8f0960900a993bb6d9196d17bf31d02a6
| 3,646,168
|
import pathlib
def path_check(path_to_check):
"""
Check that the path given as a parameter is an valid absolute path.
:param path_to_check: string which as to be checked
:type path_to_check: str
:return: True if it is a valid absolute path, False otherwise
:rtype: boolean
"""
path = pathlib.Path(path_to_check)
if not path.is_absolute():
return False
return True
|
41b3537b0be2c729ba993a49863df4a15119db8b
| 3,646,169
|
from typing import Union
from typing import Sequence
from typing import Optional
from typing import Dict
from typing import Any
from datetime import datetime
from typing import List
def _path2list(
path: Union[str, Sequence[str]],
boto3_session: boto3.Session,
s3_additional_kwargs: Optional[Dict[str, Any]],
last_modified_begin: Optional[datetime.datetime] = None,
last_modified_end: Optional[datetime.datetime] = None,
suffix: Union[str, List[str], None] = None,
ignore_suffix: Union[str, List[str], None] = None,
ignore_empty: bool = False,
) -> List[str]:
"""Convert Amazon S3 path to list of objects."""
_suffix: Optional[List[str]] = [suffix] if isinstance(suffix, str) else suffix
_ignore_suffix: Optional[List[str]] = [ignore_suffix] if isinstance(ignore_suffix, str) else ignore_suffix
if isinstance(path, str): # prefix
paths: List[str] = list_objects(
path=path,
suffix=_suffix,
ignore_suffix=_ignore_suffix,
boto3_session=boto3_session,
last_modified_begin=last_modified_begin,
last_modified_end=last_modified_end,
ignore_empty=ignore_empty,
s3_additional_kwargs=s3_additional_kwargs,
)
elif isinstance(path, list):
if last_modified_begin or last_modified_end:
raise exceptions.InvalidArgumentCombination(
"Specify a list of files or (last_modified_begin and last_modified_end)"
)
paths = path if _suffix is None else [x for x in path if x.endswith(tuple(_suffix))]
paths = path if _ignore_suffix is None else [x for x in paths if x.endswith(tuple(_ignore_suffix)) is False]
else:
raise exceptions.InvalidArgumentType(f"{type(path)} is not a valid path type. Please, use str or List[str].")
return paths
|
542d41ce29f71e3209d702eab157a75fa40650c0
| 3,646,170
|
import torch
def e_greedy_normal_noise(mags, e):
"""Epsilon-greedy noise
If e>0 then with probability(adding noise) = e, multiply mags by a normally-distributed
noise.
:param mags: input magnitude tensor
:param e: epsilon (real scalar s.t. 0 <= e <=1)
:return: noise-multiplier.
"""
if e and uniform(0, 1) <= e:
# msglogger.info("%sRankedStructureParameterPruner - param: %s - randomly choosing channels",
# threshold_type, param_name)
return torch.randn_like(mags)
return 1
|
e2e9f8f49e7d3e6b2319aaa6a869f24aa3047946
| 3,646,171
|
def beam_area(*args):
"""
Calculate the Gaussian beam area.
Parameters
----------
args: float
FWHM of the beam.
If args is a single argument, a symmetrical beam is assumed.
If args has two arguments, the two arguments are bmaj and bmin,
the width of the major and minor axes of the beam in that order.
Return
------
out: float
Beam area. No unit conversion is performed, i.e. the unit will depend
on the input arguments. For example, beam width in degree wil return
the beam area in square degree. Likewise, beam width in pixel will
return the beam area in pixel.
"""
if len(args) > 2:
raise ValueError('Input argument must be a single beam width for a '
'symmetrical beam, or widths of the major and minor '
'axes of the beam.')
if len(args) == 2:
bmaj, bmin = args
else:
bmaj = args[0]
bmin = bmaj
return np.pi * bmaj * bmin / (4 * np.log(2))
|
93c4616018a098199a47fb25038cb88707444864
| 3,646,172
|
def get_settlement_amounts(
participant1,
participant2
):
""" Settlement algorithm
Calculates the token amounts to be transferred to the channel participants when
a channel is settled.
!!! Don't change this unless you really know what you are doing.
"""
total_available_deposit = (
participant1.deposit +
participant2.deposit -
participant1.withdrawn -
participant2.withdrawn
)
participant1_amount = (
participant1.deposit +
participant2.transferred -
participant1.withdrawn -
participant1.transferred
)
participant1_amount = max(participant1_amount, 0)
participant1_amount = min(participant1_amount, total_available_deposit)
participant2_amount = total_available_deposit - participant1_amount
participant1_locked = min(participant1_amount, participant1.locked)
participant2_locked = min(participant2_amount, participant2.locked)
participant1_amount = max(participant1_amount - participant1.locked, 0)
participant2_amount = max(participant2_amount - participant2.locked, 0)
assert total_available_deposit == (
participant1_amount +
participant2_amount +
participant1_locked +
participant2_locked
)
return SettlementValues(
participant1_balance=participant1_amount,
participant2_balance=participant2_amount,
participant1_locked=participant1_locked,
participant2_locked=participant2_locked,
)
|
e4bddfccbced0235b1d5265519208cef5167013d
| 3,646,173
|
import time
def timefunc(f):
"""Simple timer function to identify slow spots in algorithm.
Just import function and put decorator @timefunc on top of definition of any
function that you want to time.
"""
def f_timer(*args, **kwargs):
start = time.time()
result = f(*args, **kwargs)
end = time.time()
print(f.__name__, 'took {:.2f} seconds'.format(end - start))
return result, (end - start)
return f_timer
|
56d5d052fa559e1b7c797ed00ee1b82c8e2126d6
| 3,646,174
|
def rdr_geobox(rdr) -> GeoBox:
""" Construct GeoBox from opened dataset reader.
"""
h, w = rdr.shape
return GeoBox(w, h, rdr.transform, rdr.crs)
|
0c22ff869faa2988e63a4b13d17c8f5ba7343ffc
| 3,646,175
|
def sequence(lst: Block[Result[_TSource, _TError]]) -> Result[Block[_TSource], _TError]:
"""Execute a sequence of result returning commands and collect the
sequence of their response."""
return traverse(identity, lst)
|
d228237edc95a4d2c4ef1c9591af41a639c42a6d
| 3,646,176
|
def keyword_dct_from_block(block, formatvals=True):
""" Take a section with keywords defined and build
a dictionary for the keywords
assumes a block that is a list of key-val pairs
"""
key_dct = None
if block is not None:
block = ioformat.remove_whitespace(block)
key_val_blocks = keyword_value_blocks(block)
if key_val_blocks is not None:
key_dct = {}
for key, val in key_val_blocks:
if formatvals:
formtd_key, formtd_val = format_keyword_values(key, val)
key_dct[formtd_key] = formtd_val
else:
key_dct[key] = val
return key_dct
|
929defe8d07fff4bf50f1167c0749a6e19d9ecb2
| 3,646,177
|
def get_geocode(args):
"""
Returns GPS coordinates from Google Maps for a given location.
"""
result = Geocoder.geocode(args.address)
lat, lon = result[0].coordinates
lat = round(lat, 6)
lon = round(lon, 6)
return (lat, lon)
|
1ef5d89a1157bbbe381ac1e4500e198735b71898
| 3,646,178
|
def mice(data, **kwargs):
"""Multivariate Imputation by Chained Equations
Reference:
Buuren, S. V., & Groothuis-Oudshoorn, K. (2011). Mice: Multivariate
Imputation by Chained Equations in R. Journal of Statistical Software,
45(3). doi:10.18637/jss.v045.i03
Implementation follows the main idea from the paper above. Differs in
decision of which variable to regress on (here, I choose it at random).
Also differs in stopping criterion (here the model stops after change in
prediction from previous prediction is less than 10%).
Parameters
----------
data: numpy.ndarray
Data to impute.
Returns
-------
numpy.ndarray
Imputed data.
"""
null_xy = find_null(data)
# Add a column of zeros to the index values
null_xyv = np.append(null_xy, np.zeros((np.shape(null_xy)[0], 1)), axis=1)
null_xyv = [[int(x), int(y), v] for x, y, v in null_xyv]
temp = []
cols_missing = set([y for _, y, _ in null_xyv])
# Step 1: Simple Imputation, these are just placeholders
for x_i, y_i, value in null_xyv:
# Column containing nan value without the nan value
col = data[:, [y_i]][~np.isnan(data[:, [y_i]])]
new_value = np.mean(col)
data[x_i][y_i] = new_value
temp.append([x_i, y_i, new_value])
null_xyv = temp
# Step 5: Repeat step 2 - 4 until convergence (the 100 is arbitrary)
converged = [False] * len(null_xyv)
while all(converged):
# Step 2: Placeholders are set back to missing for one variable/column
dependent_col = int(np.random.choice(list(cols_missing)))
missing_xs = [int(x) for x, y, value in null_xyv if y == dependent_col]
# Step 3: Perform linear regression using the other variables
x_train, y_train = [], []
for x_i in (x_i for x_i in range(len(data)) if x_i not in missing_xs):
x_train.append(np.delete(data[x_i], dependent_col))
y_train.append(data[x_i][dependent_col])
model = LinearRegression()
model.fit(x_train, y_train)
# Step 4: Missing values for the missing variable/column are replaced
# with predictions from our new linear regression model
temp = []
# For null indices with the dependent column that was randomly chosen
for i, x_i, y_i, value in enumerate(null_xyv):
if y_i == dependent_col:
# Row 'x' without the nan value
new_value = model.predict(np.delete(data[x_i], dependent_col))
data[x_i][y_i] = new_value.reshape(1, -1)
temp.append([x_i, y_i, new_value])
delta = (new_value-value)/value
if delta < 0.1:
converged[i] = True
null_xyv = temp
return data
|
e2046350d071a1cc17bc23077c0be2d0939371b5
| 3,646,179
|
from functools import reduce
def get_next_code(seen, server_ticket=0):
"""Find next unused assertion code.
Called by: SConstruct and main()
Since SConstruct calls us, codes[] must be global OR WE REPARSE EVERYTHING
"""
if not codes:
(_, _, seen) = read_error_codes()
if server_ticket:
# Each SERVER ticket is allocated 100 error codes ranging from TICKET_00 -> TICKET_99.
def generator(seen, ticket):
avail_codes = list(range(ticket * 100, (ticket + 1) * 100))
avail_codes.reverse()
while avail_codes:
code = avail_codes.pop()
if str(code) in seen:
continue
yield code
return "No more available codes for ticket. Ticket: {}".format(ticket)
return generator(seen, server_ticket)
# No server ticket. Return a generator that counts starting at highest + 1.
highest = reduce(lambda x, y: max(int(x), int(y)), (loc.code for loc in codes))
return iter(range(highest + 1, MAXIMUM_CODE))
|
b2aeef05725137208e8aaef213f1bf68eb673e06
| 3,646,182
|
import asyncio
def getEnergyUsage():
"""Query plug for energy usage data. Runs as async task.
:return: json with device energy data
"""
energy_data = asyncio.run(plug.get_emeter_realtime())
return energy_data
|
43fe5814de6776052c8c48ac65e9df8893956ef6
| 3,646,183
|
def get_sequence_from_kp(midi):
"""
Get the reduced chord sequence from a kp KP-corpus file.
Parameters
==========
midi : pretty_midi
A pretty_midi object representing the piece to parse.
Returns
=======
chords : list
The reduced chord sequence from the given piece.
times : list
The time of each chord in chords.
"""
def convert_chord_kp(chord):
"""
Convert the given chord from a string (read from the KP-corpus), to a tonic and quality.
Parameters
==========
chord : string
A string representation of a chord.
Returns
=======
tonic : int
The tonic of the chord, where 0 represents C. A chord with no tonic returns None here.
quality : int
The quality of chord, where 0 is major, 1 is minor, and 2 is diminished. Others are None.
"""
global tonic_map, accidental_map
chord = chord.split('_')
tonic = tonic_map[chord[0][0]]
if tonic is not None:
for accidental in chord[0][1:]:
tonic += accidental_map[accidental]
tonic %= 12
quality = quality_map[chord[1]]
return tonic, quality
return get_reduced_chord_sequence([convert_chord_kp(lyric.text) for lyric in midi.lyrics],
[lyric.time for lyric in midi.lyrics])
|
fd11658607e44151fbd6eaec08bff4ce510e8ba5
| 3,646,184
|
def get_bw_range(features):
"""
Get the rule-of-thumb bandwidth and a range of bandwidths on a log scale for the Gaussian RBF kernel.
:param features: Features to use to obtain the bandwidths.
:return: Tuple consisting of:
* rule_of_thumb_bw: Computed rule-of-thumb bandwidth.
* bws: List of bandwidths on a log scale.
"""
dists = sklearn.metrics.pairwise.pairwise_distances(features).reshape(-1)
rule_of_thumb_bw = np.median(dists)
gammas = np.logspace(np.log(0.5/np.percentile(dists, 99)**2), np.log(0.5/np.percentile(dists, 1)**2), 10, base=np.e)
bws = np.sqrt(1/(2*gammas))
return rule_of_thumb_bw, bws
|
2e954badf9e529bd0c62449c34a631d5be87950b
| 3,646,185
|
def gen_endpoint(endpoint_name, endpoint_config_name):
"""
Generate the endpoint resource
"""
endpoint = {
"SagemakerEndpoint": {
"Type": "AWS::SageMaker::Endpoint",
"DependsOn": "SagemakerEndpointConfig",
"Properties": {
"EndpointConfigName": {
"Fn::GetAtt": ["SagemakerEndpointConfig", "EndpointConfigName"]
},
"EndpointName": endpoint_name,
"RetainAllVariantProperties": False,
},
},
}
return endpoint
|
bc658e6aebc41cfddefe0e77b2d65748a84789c5
| 3,646,186
|
def read_dataframe(df, smiles_column, name_column, data_columns=None):
"""Read molecules from a dataframe.
Parameters
----------
df : pandas.DataFrame
Dataframe to read molecules from.
smiles_column : str
Key of column containing SMILES strings or rdkit Mol objects.
name_column : str
Key of column containing molecule name strings.
data_columns : list, optional
A list of column keys containg data to retain
in molecule graph nodes. The default is None.
Returns
-------
DataFrameMolSupplier
"""
return DataFrameMolSupplier(df, smiles_column, name_column, data_columns)
|
d27e01e38132818f0bb740fc0033f22949e9fa79
| 3,646,188
|
from typing import List
def dummy_awsbatch_cluster_config(mocker):
"""Generate dummy cluster."""
image = Image(os="alinux2")
head_node = dummy_head_node(mocker)
compute_resources = [
AwsBatchComputeResource(name="dummy_compute_resource1", instance_types=["dummyc5.xlarge", "optimal"])
]
queue_networking = AwsBatchQueueNetworking(subnet_ids=["dummy-subnet-1"], security_groups=["sg-1", "sg-2"])
queues = [AwsBatchQueue(name="queue1", networking=queue_networking, compute_resources=compute_resources)]
scheduling = AwsBatchScheduling(queues=queues)
# shared storage
shared_storage: List[Resource] = []
shared_storage.append(dummy_fsx())
shared_storage.append(dummy_ebs("/ebs1"))
shared_storage.append(dummy_ebs("/ebs2", volume_id="vol-abc"))
shared_storage.append(dummy_ebs("/ebs3", raid=Raid(raid_type=1, number_of_volumes=5)))
shared_storage.append(dummy_efs("/efs1", file_system_id="fs-efs-1"))
shared_storage.append(dummy_raid("/raid1"))
cluster = _DummyAwsBatchClusterConfig(
image=image, head_node=head_node, scheduling=scheduling, shared_storage=shared_storage
)
cluster.custom_s3_bucket = "s3://dummy-s3-bucket"
cluster.additional_resources = "https://additional.template.url"
cluster.config_version = "1.0"
cluster.iam = ClusterIam()
cluster.tags = [Tag(key="test", value="testvalue")]
return cluster
|
ff9c24e90faf92758a83c3529d261c901b614b01
| 3,646,189
|
def float_or_none(val, default=None):
"""
Arguments:
- `x`:
"""
if val is None:
return default
else:
try:
ret = float(val)
except ValueError:
ret = default
return ret
|
00beabbd2fe4633e6738fea2220d55d096bfa91e
| 3,646,190
|
def user_get_year_rating(user_id: int):
"""
Get the last step user was at
:param user_id:
:return: str
"""
try:
con = psconnect(db_url, sslmode='require')
cursor = con.cursor()
cursor.execute("SELECT year,rating FROM users WHERE uid = %s", (user_id,))
result = cursor.fetchone()
con.close()
return result
except psError as e:
print(e)
|
4911d5d0ea8f99b1d889d048aa31825452b3f3fe
| 3,646,191
|
from pymco import message
def msg_with_data(config, filter_):
"""Creates :py:class:`pymco.message.Message` instance with some data."""
# Importing here since py-cov will ignore code imported on conftest files
# imports
with mock.patch('time.time') as time:
with mock.patch('hashlib.sha1') as sha1:
time.return_value = ctxt.MSG['msgtime']
sha1.return_value.hexdigest.return_value = ctxt.MSG['requestid']
body = {
':action': 'runonce',
':data': {':noop': True, ':process_results': True},
':ssl_msgtime': 1421878604,
':ssl_ttl': 60,
}
return message.Message(body=body,
agent='puppet',
filter_=filter_,
config=config)
|
3bb50370512e58fce6c098ed2144b7406cc6eab4
| 3,646,192
|
def api_connect_wifi():
""" Connect to the specified wifi network """
res = network.wifi_connect()
return jsonify(res)
|
1a8832bb67bb1d5b73357b9b1359f6c1835f3c85
| 3,646,194
|
from typing import List
async def get_sinks_metadata(sinkId: str) -> List: # pylint: disable=unused-argument
"""Get metadata attached to sinks
This adapter does not implement metadata. Therefore this will always result
in an empty list!
"""
return []
|
458b674cc59a80572fd9676aec81d0a7c353a8f3
| 3,646,195
|
def fn_lin(x_np, *, multiplier=3.1416):
""" Linear function """
return x_np * multiplier
|
e64f112b486ea6a0bdf877d67c98417ae90f03b3
| 3,646,196
|
def get_MACD(df, column='Close'):
"""Function to get the EMA of 12 and 26"""
df['EMA-12'] = df[column].ewm(span=12, adjust=False).mean()
df['EMA-26'] = df[column].ewm(span=26, adjust=False).mean()
df['MACD'] = df['EMA-12'] - df['EMA-26']
df['Signal'] = df['MACD'].ewm(span=9, adjust=False).mean()
df['Histogram'] = df['MACD'] - df['Signal']
return df
|
b5eb25c9a5097fb2a0d874d62b6ab1957bbe3f11
| 3,646,197
|
def from_pyGraphviz_agraph(A, create_using=None):
"""Returns a EasyGraph Graph or DiGraph from a PyGraphviz graph.
Parameters
----------
A : PyGraphviz AGraph
A graph created with PyGraphviz
create_using : EasyGraph graph constructor, optional (default=None)
Graph type to create. If graph instance, then cleared before populated.
If `None`, then the appropriate Graph type is inferred from `A`.
Examples
--------
>>> K5 = eg.complete_graph(5)
>>> A = eg.to_pyGraphviz_agraph(K5)
>>> G = eg.from_pyGraphviz_agraph(A)
Notes
-----
The Graph G will have a dictionary G.graph_attr containing
the default graphviz attributes for graphs, nodes and edges.
Default node attributes will be in the dictionary G.node_attr
which is keyed by node.
Edge attributes will be returned as edge data in G. With
edge_attr=False the edge data will be the Graphviz edge weight
attribute or the value 1 if no edge weight attribute is found.
"""
if create_using is None:
if A.is_directed():
if A.is_strict():
create_using = eg.DiGraph
else:
create_using = eg.MultiDiGraph
else:
if A.is_strict():
create_using = eg.Graph
else:
create_using = eg.MultiGraph
# assign defaults
N = eg.empty_graph(0, create_using)
if A.name is not None:
N.name = A.name
# add graph attributes
N.graph.update(A.graph_attr)
# add nodes, attributes to N.node_attr
for n in A.nodes():
str_attr = {str(k): v for k, v in n.attr.items()}
N.add_node(str(n), **str_attr)
# add edges, assign edge data as dictionary of attributes
for e in A.edges():
u, v = str(e[0]), str(e[1])
attr = dict(e.attr)
str_attr = {str(k): v for k, v in attr.items()}
if not N.is_multigraph():
if e.name is not None:
str_attr["key"] = e.name
N.add_edge(u, v, **str_attr)
else:
N.add_edge(u, v, key=e.name, **str_attr)
# add default attributes for graph, nodes, and edges
# hang them on N.graph_attr
N.graph["graph"] = dict(A.graph_attr)
N.graph["node"] = dict(A.node_attr)
N.graph["edge"] = dict(A.edge_attr)
return N
|
66f57a2864f87342c84452336da647fb7489ec66
| 3,646,198
|
from typing import Collection
import copy
def get_textbox_rectangle_from_pane(pane_rectangle: GeometricRectangle, texts: Collection[str],
direction: str) -> GeometricRectangle:
"""
Args:
pane_rectangle:
texts:
direction:
Returns:
"""
num_boxes: int = len(texts)
dimensions = copy.deepcopy(pane_rectangle.dimensions)
if direction == 'right':
dimensions.width /= num_boxes
elif direction == 'down':
dimensions.height /= num_boxes
else:
raise InvalidDirectionError(f'direction must be "right" or "down": {direction}')
return GeometricRectangle(top_left=pane_rectangle.top_left,
dimensions=dimensions)
|
9e0a3bb2f0a93312d17096fe36d1b0529b5b47e6
| 3,646,199
|
def spawn_shell(shell_cmd):
"""Spawn a shell process with the provided command line. Returns the Pexpect object."""
return pexpect.spawn(shell_cmd[0], shell_cmd[1:], env=build_shell_env())
|
07de3ae221b427baddb0b47f1d52e3eae91035e7
| 3,646,200
|
from collections import OrderedDict
import sep
from grizli import utils
import tqdm
def analyze_image(data, err, seg, tab, athresh=3.,
robust=False, allow_recenter=False,
prefix='', suffix='', grow=1,
subtract_background=False, include_empty=False,
pad=0, dilate=0, make_image_cols=True):
"""
SEP/SExtractor analysis on arbitrary image
Parameters
----------
data : array
Image array
err : array
RMS error array
seg : array
Segmentation array
tab : `~astropy.table.Table`
Table output from `sep.extract` where `id` corresponds to segments in
`seg`. Requires at least columns of
``id, xmin, xmax, ymin, ymax`` and ``x, y, flag`` if want to use
`robust` estimators
athresh : float
Analysis threshold
prefix, suffix : str
Prefix and suffix to add to output table column names
Returns
-------
tab : `~astropy.table.Table`
Table with columns
``id, x, y, x2, y2, xy, a, b, theta``
``flux, background, peak, xpeak, ypeak, npix``
"""
yp, xp = np.indices(data.shape) - 0.5*(grow == 2)
# Output data
new = OrderedDict()
idcol = choose_column(tab, ['id','number'])
ids = tab[idcol]
new[idcol] = tab[idcol]
for k in ['x','y','x2','y2','xy','a','b','theta','peak','flux','background']:
if k in tab.colnames:
new[k] = tab[k].copy()
else:
new[k] = np.zeros(len(tab), dtype=np.float32)
for k in ['xpeak','ypeak','npix','flag']:
if k in tab.colnames:
new[k] = tab[k].copy()
else:
new[k] = np.zeros(len(tab), dtype=int)
for id_i in tqdm(ids):
ix = np.where(tab[idcol] == id_i)[0][0]
xmin = tab['xmin'][ix]-1-pad
ymin = tab['ymin'][ix]-1-pad
slx = slice(xmin, tab['xmax'][ix]+pad+2)
sly = slice(ymin, tab['ymax'][ix]+pad+2)
seg_sl = seg[sly, slx] == id_i
if include_empty:
seg_sl |= seg[sly, slx] == 0
if dilate > 0:
seg_sl = nd.binary_dilation(seg_sl, iterations=dilate)
if seg_sl.sum() == 0:
new['flag'][ix] |= 1
continue
if grow > 1:
sh = seg_sl.shape
seg_gr = np.zeros((sh[0]*grow, sh[1]*grow), dtype=bool)
for i in range(grow):
for j in range(grow):
seg_gr[i::grow, j::grow] |= seg_sl
seg_sl = seg_gr
xmin = xmin*grow
ymin = ymin*grow
slx = slice(xmin, (tab['xmax'][ix]+2+pad)*grow)
sly = slice(ymin, (tab['ymax'][ix]+2+pad)*grow)
if subtract_background:
if subtract_background == 2:
# Linear model
x = xp[sly, slx] - xmin
y = yp[sly, slx] - ymin
A = np.array([x[~seg_sl]*0.+1, x[~seg_sl], y[~seg_sl]])
b = data[sly,slx][~seg_sl]
lsq = np.linalg.lstsq(A.T, b)
back_level = lsq[0][0]
A = np.array([x[seg_sl]*0.+1, x[seg_sl], y[seg_sl]]).T
back_xy = A.dot(lsq[0])
else:
# Median
back_level = np.median(data[sly, slx][~seg_sl])
back_xy = back_level
else:
back_level = 0.
back_xy = back_level
dval = data[sly, slx][seg_sl] - back_xy
ival = err[sly, slx][seg_sl]
rv = dval.sum()
imax = np.argmax(dval)
peak = dval[imax]
x = xp[sly, slx][seg_sl] - xmin
y = yp[sly, slx][seg_sl] - ymin
xpeak = x[imax] + xmin
ypeak = y[imax] + ymin
thresh_sl = (dval > athresh*ival) & (ival >= 0)
new['npix'][ix] = thresh_sl.sum()
new['background'][ix] = back_level
if new['npix'][ix] == 0:
new['flag'][ix] |= 2
new['x'][ix] = np.nan
new['y'][ix] = np.nan
new['xpeak'][ix] = xpeak
new['ypeak'][ix] = ypeak
new['peak'][ix] = peak
new['flux'][ix] = rv
new['x2'][ix] = np.nan
new['y2'][ix] = np.nan
new['xy'] = np.nan
new['a'][ix] = np.nan
new['b'][ix] = np.nan
new['theta'][ix] = np.nan
continue
cval = dval[thresh_sl]
rv = cval.sum()
x = x[thresh_sl]
y = y[thresh_sl]
mx = (x*cval).sum()
my = (y*cval).sum()
mx2 = (x*x*cval).sum()
my2 = (y*y*cval).sum()
mxy = (x*y*cval).sum()
xm = mx/rv
ym = my/rv
xm2 = mx2/rv - xm**2
ym2 = my2/rv - ym**2
xym = mxy/rv - xm*ym
if robust:
if 'flag' in tab.colnames:
flag = tab['flag'][ix] & sep.OBJ_MERGED
else:
flag = False
if flag | (robust > 1):
if allow_recenter:
xn = xm
yn = ym
else:
xn = tab['x'][ix]-xmin
yn = tab['y'][ix]-ymin
xm2 = mx2 / rv + xn*xn - 2*xm*xn
ym2 = my2 / rv + yn*yn - 2*ym*yn
xym = mxy / rv + xn*yn - xm*yn - xn*ym
xm = xn
ym = yn
temp2 = xm2*ym2-xym*xym
if temp2 < 0.00694:
xm2 += 0.0833333
ym2 += 0.0833333
temp2 = xm2*ym2-xym*xym;
temp = xm2 - ym2
if np.abs(temp) > 0:
theta = np.clip(np.arctan2(2.0*xym, temp)/2.,
-np.pi/2.+1.e-5, np.pi/2.-1.e-5)
else:
theta = np.pi/4
temp = np.sqrt(0.25*temp*temp+xym*xym);
pmy2 = pmx2 = 0.5*(xm2+ym2);
pmx2 += temp
pmy2 -= temp
amaj = np.sqrt(pmx2)
amin = np.sqrt(pmy2)
new['x'][ix] = xm+xmin
new['y'][ix] = ym+ymin
new['xpeak'][ix] = xpeak
new['ypeak'][ix] = ypeak
new['peak'][ix] = peak
new['flux'][ix] = rv
new['x2'][ix] = xm2
new['y2'][ix] = ym2
new['xy'] = xym
new['a'][ix] = amaj
new['b'][ix] = amin
new['theta'][ix] = theta
new['flag'] |= ((~np.isfinite(new['a'])) | (new['a'] <= 0))*4
new['flag'] |= ((~np.isfinite(new['b'])) | (new['b'] <= 0))*8
newt = utils.GTable()
for k in new:
newt[f'{prefix}{k}{suffix}'] = new[k]
if make_image_cols:
newt['a_image'] = newt['a']
newt['b_image'] = newt['b']
newt['theta_image'] = newt['theta']
newt['x_image'] = newt['x']+1
newt['y_image'] = newt['y']+1
return newt
|
b90920ac00a995fde90c43b07cae45a752786c15
| 3,646,201
|
def get_experiment_table(faultgroup, faultname, tablename):
"""
Get anny table from a faultgroup
"""
node = faultgroup._f_get_child(faultname)
table = node._f_get_child(tablename)
return pd.DataFrame(table.read())
|
16c78e6925d3da227402e2e17e00bac38ff72ab7
| 3,646,203
|
def kjunSeedList(baseSeed, n):
"""
generates n seeds
Due to the way it generates the seed, do not use i that is too large..
"""
assert n <= 100000
rs = ra.RandomState(baseSeed);
randVals = rs.randint(np.iinfo(np.uint32).max+1, size=n);
return randVals;
|
6d98adfba2917ede64d0572c21d6fe1041327241
| 3,646,204
|
import PIL
def filter_sharpen(image):
"""Apply a sharpening filter kernel to the image.
This is the same as using PIL's ``PIL.ImageFilter.SHARPEN`` kernel.
Added in 0.4.0.
**Supported dtypes**:
* ``uint8``: yes; fully tested
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: no
* ``float64``: no
* ``float128``: no
* ``bool``: no
Parameters
----------
image : ndarray
The image to modify.
Returns
-------
ndarray
Sharpened image.
"""
return _filter_by_kernel(image, PIL.ImageFilter.SHARPEN)
|
28b83153dc8931430e22f63f889cf195f01f80da
| 3,646,205
|
async def zha_client(hass, config_entry, zha_gateway, hass_ws_client):
"""Test zha switch platform."""
# load the ZHA API
async_load_api(hass)
# create zigpy device
await async_init_zigpy_device(
hass,
[general.OnOff.cluster_id, general.Basic.cluster_id],
[],
None,
zha_gateway,
)
await async_init_zigpy_device(
hass,
[general.OnOff.cluster_id, general.Basic.cluster_id, general.Groups.cluster_id],
[],
zigpy.profiles.zha.DeviceType.ON_OFF_LIGHT,
zha_gateway,
manufacturer="FakeGroupManufacturer",
model="FakeGroupModel",
ieee="01:2d:6f:00:0a:90:69:e8",
)
# load up switch domain
await hass.config_entries.async_forward_entry_setup(config_entry, DOMAIN)
await hass.async_block_till_done()
await hass.config_entries.async_forward_entry_setup(config_entry, light_domain)
await hass.async_block_till_done()
return await hass_ws_client(hass)
|
ba659195dc2e3d8d3510c25edcf4850a740483c1
| 3,646,206
|
import random
def summary_selector(summary_models=None):
"""
Will create a function that take as input a dict of summaries :
{'T5': [str] summary_generated_by_T5, ..., 'KW': [str] summary_generted_by_KW}
and randomly return a summary that has been generated by one of the summary_model in summary_model
if summary_models is none, will not use summaru
:param summary_models: list of str(SummarizerModel)
:return: function [dict] -> [str]
"""
if summary_models is None or len(summary_models) == 0 or \
(len(summary_models) == 1 and summary_models[0] == ""):
return lambda x: ""
summary_model = random.choice(summary_models)
return lambda summaries_dict: summaries_dict[summary_model]
|
b8a2336546324d39ff87ff5b59f4f1174e5dd54c
| 3,646,207
|
import collections
from datetime import datetime
def handle_collectd(root_dir):
"""Generate figure for each plugin for each hoster."""
result = collections.defaultdict(lambda: collections.defaultdict(dict))
for host in natsorted(root_dir.iterdir()):
for plugin in natsorted(host.iterdir()):
stats_list = natsorted(
[fname for fname in plugin.iterdir() if fname.suffix == ".rrd"]
)
title = plugin.name
result[host.name][plugin.name] = {
"daily": rrd2svg(
stats_list,
f"{title} - by day",
start_time=datetime.datetime.now() - datetime.timedelta(days=1),
),
"monthly": rrd2svg(
stats_list,
f"{title} - by month",
start_time=datetime.datetime.now() - datetime.timedelta(weeks=4),
),
}
if len(result[host.name]) > 20:
break
return result
|
48eb4f2ad5976d51fbe1904219e90620aea1a82c
| 3,646,208
|
def create_policy_case_enforcement(repository_id, blocking, enabled,
organization=None, project=None, detect=None):
"""Create case enforcement policy.
"""
organization, project = resolve_instance_and_project(
detect=detect, organization=organization, project=project)
policy_client = get_policy_client(organization)
configuration = create_configuration_object(repository_id, None, blocking, enabled,
'40e92b44-2fe1-4dd6-b3d8-74a9c21d0c6e',
['enforceConsistentCase'],
['true'])
return policy_client.create_policy_configuration(configuration=configuration, project=project)
|
60864cd51472029991a4bb783a39007ea42e4b58
| 3,646,209
|
def svn_fs_new(*args):
"""svn_fs_new(apr_hash_t fs_config, apr_pool_t pool) -> svn_fs_t"""
return apply(_fs.svn_fs_new, args)
|
6ade0887b16e522d47d70c974ccecf8f8bec1403
| 3,646,210
|
def least_similar(sen, voting_dict):
"""
Find senator with voting record least similar, excluding the senator passed
:param sen: senator last name
:param voting_dict: dictionary of voting record by last name
:return: senator last name with least similar record, in case of a tie chooses first alphabetically
>>> voting_dict = create_voting_dict(list(open('voting_record_dump109.txt')))
>>> least_similar('Mikulski', voting_dict)
'Inhofe'
>>> least_similar('Santorum', voting_dict) # 2.12.5
'Feingold'
"""
return specifier_similar(sen, voting_dict, '<')['sen']
|
8bcc8cde75e9ce060f852c0e7e03756d279491f0
| 3,646,212
|
import logging
def _send_req(wait_sec, url, req_gen, retry_result_code=None):
""" Helper function to send requests and retry when the endpoint is not ready.
Args:
wait_sec: int, max time to wait and retry in seconds.
url: str, url to send the request, used only for logging.
req_gen: lambda, no parameter function to generate requests.Request for the
function to send to the endpoint.
retry_result_code: int (optional), status code to match or retry the request.
Returns:
requests.Response
"""
def retry_on_error(e):
return isinstance(e, (SSLError, ReqConnectionError))
# generates function to see if the request needs to be retried.
# if param `code` is None, will not retry and directly pass back the response.
# Otherwise will retry if status code is not matched.
def retry_on_result_func(code):
if code is None:
return lambda _: False
return lambda resp: not resp or resp.status_code != code
@retry(stop_max_delay=wait_sec * 1000, wait_fixed=10 * 1000,
retry_on_exception=retry_on_error,
retry_on_result=retry_on_result_func(retry_result_code))
def _send(url, req_gen):
resp = None
logging.info("sending request to %s", url)
try:
resp = req_gen()
except Exception as e:
logging.warning("%s: request with error: %s", url, e)
raise e
return resp
return _send(url, req_gen)
|
d6856bf241f857f3acd8768fd71d058d4c94baaa
| 3,646,213
|
def load_file(path, types = None):
"""
load file in path if file format in types list
----
:param path: file path
:param code: file type list, if None, load all files, or not load the files in the list, such as ['txt', 'xlsx']
:return: a list is [path, data]
"""
ext = path.split(".")[-1]
if types != None:
if ext not in types: # filter this file
return None
if ext == "txt":
return [path, __load_txt(path)]
else:
print("pyftools: format", ext, "not support!")
return None
|
dab404cf2399e3b87d23babb1a09be2b94c3d924
| 3,646,214
|
import numpy
def get_dense_labels_map(values, idx_dtype='uint32'):
"""
convert unique values into dense int labels [0..n_uniques]
:param array values: (n,) dtype array
:param dtype? idx_dtype: (default: 'uint32')
:returns: tuple(
labels2values: (n_uniques,) dtype array,
values2labels: HashMap(dtype->int),
)
"""
# get unique values
unique_values = unique(values)
# build labels from 0 to n_uniques
labels = numpy.arange(unique_values.shape[0], dtype=idx_dtype)
# build small hashmap with just the unique items
values2labels = Hashmap(unique_values, labels)
return unique_values, values2labels
|
cd9f2884e26fa22785e24598f0f485d2931427d8
| 3,646,215
|
from typing import Match
def _replace_fun_unescape(m: Match[str]) -> str:
""" Decode single hex/unicode escapes found in regex matches.
Supports single hex/unicode escapes of the form ``'\\xYY'``,
``'\\uYYYY'``, and ``'\\UYYYYYYYY'`` where Y is a hex digit. Only
decodes if there is an odd number of backslashes.
.. versionadded:: 0.2
Parameters
----------
m : regex match
Returns
-------
c : str
The unescaped character.
"""
slsh = b'\\'.decode('ascii')
s = m.group(0)
count = s.count(slsh)
if count % 2 == 0:
return s
else:
c = chr(int(s[(count + 1):], base=16))
return slsh * (count - 1) + c
|
3fdb275e3c15697e5302a6576b4d7149016299c0
| 3,646,217
|
def predict_next_location(game_data, ship_name):
"""
Predict the next location of a space ship.
Parameters
----------
game_data: data of the game (dic).
ship_name: name of the spaceship to predicte the next location (str).
facing: facing of the ship (tuple)
Return
------
predicted_location : predicte location of the spaceship (tuple(int, int)).
Version
-------
Specification: Nicolas Van Bossuyt (v1. 19/03/17).
Implementation: Nicolas Van Bossuyt (v1. 19/03/17).
Bayron Mahy (v2. 22/03/17).
"""
ship_location = game_data['ships'][ship_name]['location']
ship_facing = game_data['ships'][ship_name]['facing']
ship_speed = game_data['ships'][ship_name]['speed']
return next_location(ship_location, ship_facing, ship_speed, game_data['board_size'])
|
996b58e0ac8d8754a49020e0e5df830fa472be99
| 3,646,218
|
def check_answer(guess, a_follower, b_follower):
"""Chcek if the user guessed the correct option"""
if a_follower > b_follower:
return guess == "a"
else:
return guess == "b"
|
acd1e78026f89dd1482f4471916472d35edf68a7
| 3,646,220
|
def respond(variables, Body=None, Html=None, **kwd):
"""
Does the grunt work of cooking up a MailResponse that's based
on a template. The only difference from the lamson.mail.MailResponse
class and this (apart from variables passed to a template) are that
instead of giving actual Body or Html parameters with contents,
you give the name of a template to render. The kwd variables are
the remaining keyword arguments to MailResponse of From/To/Subject.
For example, to render a template for the body and a .html for the Html
attachment, and to indicate the From/To/Subject do this:
msg = view.respond(locals(), Body='template.txt',
Html='template.html',
From='test@test.com',
To='receiver@test.com',
Subject='Test body from "%(dude)s".')
In this case you're using locals() to gather the variables needed for
the 'template.txt' and 'template.html' templates. Each template is
setup to be a text/plain or text/html attachment. The From/To/Subject
are setup as needed. Finally, the locals() are also available as
simple Python keyword templates in the From/To/Subject so you can pass
in variables to modify those when needed (as in the %(dude)s in Subject).
"""
assert Body or Html, "You need to give either the Body or Html template of the mail."
for key in kwd:
kwd[key] = kwd[key] % variables
msg = mail.MailResponse(**kwd)
if Body:
msg.Body = render(variables, Body)
if Html:
msg.Html = render(variables, Html)
return msg
|
214513edf420dc629603cc98d1728dec8c81aee9
| 3,646,222
|
from typing import Dict
def canonical_for_code_system(jcs: Dict) -> str:
"""get the canonical URL for a code system entry from the art decor json. Prefer FHIR URIs over the generic OID URI.
Args:
jcs (Dict): the dictionary describing the code system
Returns:
str: the canonical URL
"""
if "canonicalUriR4" in jcs:
return jcs["canonicalUriR4"]
else:
return jcs["canonicalUri"]
|
f111a4cb65fa75799e799f0b088180ef94b71cc8
| 3,646,223
|
def correspdesc_source(data):
"""
extract @source from TEI elements <correspDesc>
"""
correspdesc_data = correspdesc(data)
try:
return [cd.attrib["source"].replace("#", "") for cd in correspdesc_data]
except KeyError:
pass
try:
return [cd.attrib[ns_cs("source")].replace("#", "") for cd in correspdesc_data]
except KeyError:
pass
return []
|
18a2fe1d0daf0f383c8b8295105ad0027b626f31
| 3,646,224
|
def leaders(Z, T):
"""
(L, M) = leaders(Z, T):
For each flat cluster j of the k flat clusters represented in the
n-sized flat cluster assignment vector T, this function finds the
lowest cluster node i in the linkage tree Z such that:
* leaf descendents belong only to flat cluster j (i.e. T[p]==j
for all p in S(i) where S(i) is the set of leaf ids of leaf
nodes descendent with cluster node i)
* there does not exist a leaf that is not descendent with i
that also belongs to cluster j (i.e. T[q]!=j for all q not in S(i)).
If this condition is violated, T is not a valid cluster assignment
vector, and an exception will be thrown.
Two k-sized numpy vectors are returned, L and M. L[j]=i is the linkage
cluster node id that is the leader of flat cluster with id M[j]. If
i < n, i corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
"""
Z = np.asarray(Z)
T = np.asarray(T)
if type(T) != _array_type or T.dtype != np.int:
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype=np.int32)
M = np.zeros((kk,), dtype=np.int32)
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _cluster_wrap.leaders_wrap(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError('T is not a valid assignment vector. Error found when examining linkage node %d (< 2n-1).' % s)
return (L, M)
|
7b72b33b87e454138c144a791612d7af8422b0a6
| 3,646,225
|
from typing import List
def create_unique_views(rows: list, fields: List[str]):
"""Create views for each class objects, default id should be a whole row"""
views = {}
for r in rows:
values = [r[cname] for cname in fields]
if any(isinstance(x, list) for x in values):
if all(isinstance(x, list) for x in values) and len({len(x) for x in values}) == 1:
# all its value is in a list
for j in range(len(values[0])):
key = ",".join(str(values[i][j]) for i in range(len(values)))
views[key] = [values[i][j] for i in range(len(values))]
else:
# assert False
key = ",".join((str(x) for x in values))
views[key] = values
else:
key = ",".join((str(x) for x in values))
views[key] = values
views = [{cname: r[i] for i, cname in enumerate(fields)} for r in views.values()]
return views
|
24b311c8b013f742e69e7067c1f1bafe0044c940
| 3,646,226
|
from typing import List
import torch
def check_shape_function(invocations: List[Invocation]):
"""Decorator that automatically tests a shape function.
The shape function, which is expected to be named systematically with
`〇` instead of `.`, is tested against the corresponding op in
`torch.ops.*` function using the given invocations.
"""
def decorator(f):
# `torch.ops.*` functions are overloaded already, so we don't need
# to pass in the overload name.
ns, unqual = f.__name__.split("〇")[:2]
op = getattr(getattr(torch.ops, ns), unqual)
for invocation in invocations:
shape_fn_error, op_error = None, None
try:
result_shapes = _normalize_multiple_results_to_list(f(
*invocation.to_shape_function_args(),
**invocation.kwargs))
except Exception as e:
shape_fn_error = f"{e}"
try:
golden_results = _normalize_multiple_results_to_list(op(
*invocation.to_real_op_args(),
**invocation.kwargs))
except Exception as e:
op_error = f"{e}"
def report(error_message: str):
raise ValueError(f"For shape function {f.__name__!r} with invocation {invocation}: {error_message}")
# Check for error behavior.
if invocation.is_expected_to_raise_exception():
if shape_fn_error is None and op_error is None:
report(f"Expected to raise an exception, but neither shape function nor op raised an exception")
if shape_fn_error is None:
report(f"Op raised error {op_error!r}, but shape function did not.")
if op_error is None:
report(f"Shape function raised error {shape_fn_error!r}, but op did not.")
else:
if shape_fn_error is not None and op_error is not None:
report(f"Both shape function and op raised errors, but were not expected to. Shape function raised error {shape_fn_error!r} and op raised error {op_error!r}.")
if shape_fn_error is not None:
report(f"Shape function raised error {shape_fn_error!r} but op did not raise any error.")
if op_error is not None:
report(f"Op raised error {op_error!r} but shape function did not raise any error.")
if shape_fn_error is not None or op_error is not None:
# If both raised errors, then that is good -- the shape function
# and the real op should agree on the erroneous cases.
# The exact error message might differ though.
if shape_fn_error is not None and op_error is not None:
continue
# Check for matching results.
if len(result_shapes) != len(golden_results):
report(f"Expected {len(golden_results)} result shapes, got {len(result_shapes)}")
for result_shape, golden_result in zip(result_shapes, golden_results):
for dimension_size, golden_dimension_size in zip(result_shape, golden_result.shape):
if dimension_size != golden_dimension_size:
report(f"Expected result shape {golden_result.shape}, got {result_shape}")
return f
return decorator
|
be237f2209f1e2007b53a1ecbe0c277bf2b37fe7
| 3,646,227
|
def _prepare_images(ghi, clearsky, daytime, interval):
"""Prepare data as images.
Performs pre-processing steps on `ghi` and `clearsky` before
returning images for use in the shadow detection algorithm.
Parameters
----------
ghi : Series
Measured GHI. [W/m^2]
clearsky : Series
Expected clearsky GHI. [W/m^2]
daytime : Series
Boolean series with True for daytime and False for night.
interval : int
Time between data points in `ghi`. [minutes]
Returns
-------
ghi_image : np.ndarray
Image form of `ghi`
clearsky_image : np.ndarray
Image form of `clearsky`
clouds_image : np.ndarray
Image of the cloudy periods in `ghi`
image_times : pandas.DatetimeIndex
Index for the data included in the returned images. Leading
and trailing days with incomplete data are not included in the
image, these times are needed to build a Series from the image
later on.
"""
# Fill missing times by interpolation. Missing data at the
# beginning or end of the series is not filled in, and will be
# excluded from the images used for shadow detection.
image_width = 1440 // interval
ghi = ghi.interpolate(limit_area='inside')
# drop incomplete days.
ghi = ghi[ghi.resample('D').transform('count') == image_width]
image_times = ghi.index
ghi_image = _to_image(ghi.to_numpy(), image_width)
scaled_ghi = (ghi * 1000) / np.max(_smooth(ghi_image))
scaled_clearsky = (clearsky * 1000) / clearsky.max()
scaled_clearsky = scaled_clearsky.reindex_like(scaled_ghi)
daytime = daytime.reindex_like(scaled_ghi)
# Detect clouds.
window_size = 50 // interval
clouds = _detect_clouds(scaled_ghi, scaled_clearsky, window_size)
cloud_mask = _to_image(clouds.to_numpy(), image_width)
# Interpolate across days (i.e. along columns) to remove clouds
# replace clouds with nans
#
# This could probably be done directly with scipy.interpolate.inter1d,
# but the easiest approach is to turn the image into a dataframe and
# interpolate along the columns.
cloudless_image = ghi_image.copy()
cloudless_image[cloud_mask] = np.nan
clouds_image = ghi_image.copy()
clouds_image[~cloud_mask] = np.nan
ghi_image = pd.DataFrame(cloudless_image).interpolate(
axis=0,
limit_direction='both'
).to_numpy()
# set night to nan
ghi_image[~_to_image(daytime.to_numpy(), image_width)] = np.nan
return (
ghi_image,
_to_image(scaled_clearsky.to_numpy(), image_width),
clouds_image,
image_times
)
|
9433cce0ccb9dae5e5b364fce42f8ed391adf239
| 3,646,228
|
import numpy
def interleaved_code(modes: int) -> BinaryCode:
""" Linear code that reorders orbitals from even-odd to up-then-down.
In up-then-down convention, one can append two instances of the same
code 'c' in order to have two symmetric subcodes that are symmetric for
spin-up and -down modes: ' c + c '.
In even-odd, one can concatenate with the interleaved_code
to have the same result:' interleaved_code * (c + c)'.
This code changes the order of modes from (0, 1 , 2, ... , modes-1 )
to (0, modes/2, 1 modes/2+1, ... , modes-1, modes/2 - 1).
n_qubits = n_modes.
Args: modes (int): number of modes, must be even
Returns (BinaryCode): code that interleaves orbitals
"""
if modes % 2 == 1:
raise ValueError('number of modes must be even')
else:
mtx = numpy.zeros((modes, modes), dtype=int)
for index in numpy.arange(modes // 2, dtype=int):
mtx[index, 2 * index] = 1
mtx[modes // 2 + index, 2 * index + 1] = 1
return BinaryCode(mtx, linearize_decoder(mtx.transpose()))
|
e9b178165c8fe1e33d880dee056a3e397fa90bce
| 3,646,229
|
from sklearn.neighbors import NearestNeighbors
def nearest_neighbors(data, args):
"""
最近邻
"""
nbrs = NearestNeighbors(**args)
nbrs.fit(data)
# 计算测试数据对应的最近邻下标和距离
# distances, indices = nbrs.kneighbors(test_data)
return nbrs
|
d91014d082f7a15a26e453d32272381b7578c9de
| 3,646,230
|
def gradient(v, surf):
"""
:param v: vector of x, y, z coordinates
:param phase: which implicit surface is being used to approximate the structure of this phase
:return: The gradient vector (which is normal to the surface at x)
"""
x = v[0]
y = v[1]
z = v[2]
if surf == 'Ia3d' or surf == 'gyroid' or surf == 'ia3d':
a = np.cos(x)*np.cos(y) - np.sin(x)*np.sin(z)
b = -np.sin(y)*np.sin(x) + np.cos(y)*np.cos(z)
c = -np.sin(y)*np.sin(z) + np.cos(z)*np.cos(x)
elif surf == 'Pn3m' or surf == 'pn3m':
a = np.cos(x)*np.sin(y)*np.sin(z) + np.cos(x)*np.cos(y)*np.cos(z) - np.sin(x)*np.sin(y)*np.cos(z) - np.sin(x)*np.cos(y)*np.sin(z)
b = np.sin(x)*np.cos(y)*np.sin(z) - np.sin(x)*np.sin(y)*np.cos(z) + np.cos(x)*np.cos(y)*np.cos(z) - np.cos(x)*np.sin(y)*np.sin(z)
c = np.sin(x)*np.sin(y)*np.cos(z) - np.sin(x)*np.cos(y)*np.sin(z) - np.cos(x)*np.sin(y)*np.sin(z) + np.cos(x)*np.cos(y)*np.cos(z)
elif surf == 'sphere':
a = 2*x
b = 2*y
c = 2*z
return np.array([a, b, c])
|
2105c491f122508531816d15146801b0dd1c9b75
| 3,646,231
|
def authenticated_client(client, user):
"""
"""
client.post(
'/login',
data={'username': user.username, 'password': 'secret'},
follow_redirects=True,
)
return client
|
5f96ef56179848f7d348ffda67fc08cfccb080ed
| 3,646,232
|
def viterbi(obs, states, start_p, trans_p, emit_p):
"""
請參考李航書中的算法10.5(維特比算法)
HMM共有五個參數,分別是觀察值集合(句子本身, obs),
狀態值集合(all_states, 即trans_p.keys()),
初始機率(start_p),狀態轉移機率矩陣(trans_p),發射機率矩陣(emit_p)
此處的states是為char_state_tab_P,
這是一個用來查詢漢字可能狀態的字典
此處沿用李航書中的符號,令T=len(obs),令N=len(trans_p.keys())
"""
"""
維特比算法第1步:初始化
"""
#V:李航書中的delta,在時刻t狀態為i的所有路徑中之機率最大值
V = [{}] # tabular
#李航書中的Psi,T乘N維的矩陣
#表示在時刻t狀態為i的所有單個路徑(i_1, i_2, ..., i_t-1, i)中概率最大的路徑的第t-1個結點
mem_path = [{}]
#共256種狀態,所謂"狀態"是:"分詞標籤(BMES)及詞性(v, n, nr, d, ...)的組合"
all_states = trans_p.keys()
#obs[0]表示句子的第一個字
#states.get(obs[0], all_states)表示該字可能是由哪些狀態發射出來的
for y in states.get(obs[0], all_states): # init
#在時間點0,狀態y的log機率為:
#一開始在y的log機率加上在狀態y發射obs[0]觀察值的log機率
V[0][y] = start_p[y] + emit_p[y].get(obs[0], MIN_FLOAT)
#時間點0在狀態y,則前一個時間點會在哪個狀態
mem_path[0][y] = ''
"""
維特比算法第2步:遞推
"""
#obs: 觀察值序列
for t in xrange(1, len(obs)):
V.append({})
mem_path.append({})
#prev_states = get_top_states(V[t-1])
#mem_path[t - 1].keys(): 前一個時間點在什麼狀態,這裡以x代表
#只有在len(trans_p[x])>0(即x有可能轉移到其它狀態)的情況下,prev_states才保留x
prev_states = [
x for x in mem_path[t - 1].keys() if len(trans_p[x]) > 0]
#前一個狀態是x(prev_states中的各狀態),那麼現在可能在什麼狀態(y)
prev_states_expect_next = set(
(y for x in prev_states for y in trans_p[x].keys()))
#set(states.get(obs[t], all_states)):句子的第t個字可能在什麼狀態
#prev_states_expect_next:由前一個字推斷,當前的字可能在什麼狀態
#obs_states:以上兩者的交集
obs_states = set(
states.get(obs[t], all_states)) & prev_states_expect_next
#如果交集為空,則依次選取prev_states_expect_next或all_states
if not obs_states:
obs_states = prev_states_expect_next if prev_states_expect_next else all_states
for y in obs_states:
#李航書中的公式10.45
#y0表示前一個時間點的狀態
#max的參數是一個list of tuple: [(機率1,狀態1),(機率2,狀態2),...]
#V[t - 1][y0]:時刻t-1在狀態y0的機率對數
#trans_p[y0].get(y, MIN_INF):由狀態y0轉移到y的機率對數
#emit_p[y].get(obs[t], MIN_FLOAT):在狀態y發射出觀測值obs[t]的機率對數
#三項之和表示在時刻t由狀態y0到達狀態y的路徑的機率對數
prob, state = max((V[t - 1][y0] + trans_p[y0].get(y, MIN_INF) +
emit_p[y].get(obs[t], MIN_FLOAT), y0) for y0 in prev_states)
#挑選機率最大者將之記錄於V及mem_path
V[t][y] = prob
#時刻t在狀態y,則時刻t-1最有可能在state這個狀態
mem_path[t][y] = state
"""
維特比算法第3步:終止
"""
#mem_path[-1].keys():最後一個時間點可能在哪些狀態
#V[-1][y]:最後一個時間點在狀態y的機率
#把mem_path[-1]及V[-1]打包成一個list of tuple
last = [(V[-1][y], y) for y in mem_path[-1].keys()]
# if len(last)==0:
# print obs
#最後一個時間點最有可能在狀態state,其機率為prob
#在jieba/finalseg/__init__.py的viterbi函數中有限制句子末字的分詞標籤需為E或S
#這裡怎麼沒做這個限制?
prob, state = max(last)
"""
維特比算法第4步:最優路徑回溯
"""
route = [None] * len(obs)
i = len(obs) - 1
while i >= 0:
route[i] = state
#時間點i在狀態state,則前一個時間點最有可能在狀態mem_path[i][state]
state = mem_path[i][state]
i -= 1
return (prob, route)
|
42f3037042114c0b4e56053ac6dfc6bd77423d39
| 3,646,233
|
def add_merge_variants_arguments(parser):
"""
Add arguments to a parser for sub-command "stitch"
:param parser: argeparse object
:return:
"""
parser.add_argument(
"-vp",
"--vcf_pepper",
type=str,
required=True,
help="Path to VCF file from PEPPER SNP."
)
parser.add_argument(
"-vd",
"--vcf_deepvariant",
type=str,
required=True,
help="Path to VCF file from DeepVariant."
)
parser.add_argument(
"-o",
"--output_dir",
type=str,
required=True,
help="Path to output directory."
)
return parser
|
5fd6bc936ba1d17ea86a49499e1f6b816fb0a389
| 3,646,234
|
import graph_scheduler
import types
def set_time_scale_alias(name: str, target: TimeScale):
"""Sets an alias named **name** of TimeScale **target**
Args:
name (str): name of the alias
target (TimeScale): TimeScale that **name** will refer to
"""
name_aliased_time_scales = list(filter(
lambda e: _time_scale_aliases[e] == name,
_time_scale_aliases
))
if len(name_aliased_time_scales) > 0:
raise ValueError(f"'{name}' is already aliased to {name_aliased_time_scales[0]}")
try:
target = getattr(TimeScale, target)
except TypeError:
pass
except AttributeError as e:
raise ValueError(f'Invalid TimeScale {target}') from e
_time_scale_aliases[target] = name
setattr(TimeScale, name, target)
def getter(self):
return getattr(self, _time_scale_to_attr_str(target))
def setter(self, value):
setattr(self, _time_scale_to_attr_str(target), value)
prop = property(getter).setter(setter)
setattr(Time, name.lower(), prop)
setattr(SimpleTime, name.lower(), prop)
# alias name in style of a class name
new_class_segment_name = _time_scale_to_class_str(name)
for cls_name, cls in graph_scheduler.__dict__.copy().items():
# make aliases of conditions that contain a TimeScale name (e.g. AtEnvironmentStateUpdate)
target_class_segment_name = _time_scale_to_class_str(target)
if isinstance(cls, (type, types.ModuleType)):
if isinstance(cls, types.ModuleType):
try:
if _alias_docs_warning_str not in cls.__doc__:
cls.__doc__ = f'{_alias_docs_warning_str}{cls.__doc__}'
except TypeError:
pass
_multi_substitute_docstring(
cls,
{
target.name: name,
target_class_segment_name: new_class_segment_name,
}
)
if target_class_segment_name in cls_name:
new_cls_name = cls_name.replace(
target_class_segment_name,
new_class_segment_name
)
setattr(graph_scheduler.condition, new_cls_name, cls)
setattr(graph_scheduler, new_cls_name, cls)
graph_scheduler.condition.__all__.append(new_cls_name)
graph_scheduler.__all__.append(new_cls_name)
|
889f2b70735a11ce8330e58b7294d3d115334d5f
| 3,646,235
|
def find_bounding_boxes(img):
"""
Find bounding boxes for blobs in the picture
:param img - numpy array 1xWxH, values 0 to 1
:return: bounding boxes of blobs [x0, y0, x1, y1]
"""
img = util.torch_to_cv(img)
img = np.round(img)
img = img.astype(np.uint8)
contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # params copied from tutorial
bounding_boxes = []
for c in contours:
x, y, w, h = cv2.boundingRect(c)
bounding_boxes.append([x, y, x + w, y + h])
return bounding_boxes
|
7b7de4d163b18b099721c39b69721e477c473c16
| 3,646,236
|
from typing import Optional
from datetime import datetime
def resolve_day(day: str, next_week: Optional[bool] = False) -> int:
"""Resolves day to index value."""
week = ['monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday',
'sunday']
today = datetime.now()
today_idx = date.weekday(today)
day_idx = week.index(day)
temp_list = list(islice(cycle(week), today_idx, 2 * today_idx + day_idx))
if next_week:
return len(temp_list) - 1
else:
return temp_list.index(day)
|
d09aba564f0293ac8b92699427199998bf7e869f
| 3,646,237
|
import logging
def get_gsheet_data():
"""
Get's all of the data in the specified Google Sheet.
"""
# Get Credentials from JSON
logging.info('Attempting to read values to Google Sheet.')
creds = ServiceAccountCredentials.from_json_keyfile_name('TrackCompounds-1306f02bc0b1.json', SCOPES)
logging.info('Authorizing Google API credentials.')
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=SPREADSHEET_ID,
range=READ_RANGE).execute()
data = result.get('values')
# Turn data into a DataFrame
df = pd.DataFrame(data[1:], columns=data[0])
logging.info('Successfully read G-Sheet data into a DataFrame.')
return df
|
872671a1bc9b17fec5e6db3fb4a7172567da4eff
| 3,646,239
|
def name_tensor(keras_tensor, name):
"""
Add a layer with this ``name`` that does nothing.
Usefull to mark a tensor.
"""
return Activation('linear', name=name)(keras_tensor)
|
9ac83e8974efa2e48ab14d150a5da47ac7c23fb5
| 3,646,240
|
import operator
def pluck(ind, seqs, default=no_default):
""" plucks an element or several elements from each item in a sequence.
``pluck`` maps ``itertoolz.get`` over a sequence and returns one or more
elements of each item in the sequence.
This is equivalent to running `map(curried.get(ind), seqs)`
``ind`` can be either a single string/index or a sequence of
strings/indices.
``seqs`` should be sequence containing sequences or dicts.
e.g.
>>> data = [{'id': 1, 'name': 'Cheese'}, {'id': 2, 'name': 'Pies'}]
>>> list(pluck('name', data))
['Cheese', 'Pies']
>>> list(pluck([0, 1], [[1, 2, 3], [4, 5, 7]]))
[(1, 2), (4, 5)]
See Also:
get
map
"""
if default is no_default:
if isinstance(ind, list):
return map(operator.itemgetter(*ind), seqs)
return map(operator.itemgetter(ind), seqs)
elif isinstance(ind, list):
return (tuple(_get(item, seq, default) for item in ind)
for seq in seqs)
return (_get(ind, seq, default) for seq in seqs)
|
9bb31f94115eec0ba231c3c2bf9c067a52efca52
| 3,646,241
|
def shape_metrics(model):
"""""
Calculates three different shape metrics of the current graph of the model.
Shape metrics: 1. Density 2. Variance of nodal degree 3. Centrality
The calculations are mainly based on the degree statistics of the current
graph
For more information one is referred to the article 'Geographical
influences of an emerging network of gang rivalries'
(Rachel A. Hegemann et al., 2011)
Input:
model = Model object
Output:
Tuple containing the three shape metrics in the order described above.
"""
# Determine total degree, average degree, max degree and density graph
degrees = [degree[1] for degree in model.gr.degree]
total_degree = sum(degrees)
ave_degree = total_degree / model.config.total_gangs
max_degree = max(degrees)
graph_density = nx.density(model.gr)
# Determine variance of nodal degree and centrality
variance_degree, centrality = 0, 0
for degree in degrees:
variance_degree += ((degree - ave_degree) * (degree - ave_degree))
centrality += max_degree - degree
# Normailize variance of nodal degree and centrality
variance_degree /= model.config.total_gangs
centrality /= ((model.config.total_gangs - 1) *
(model.config.total_gangs - 2))
# Returns a tuple containging the three statistics
return graph_density, variance_degree, centrality
|
003e72145ade222a7ec995eae77f6028527e8ba9
| 3,646,242
|
def calculate_actual_sensitivity_to_removal(jac, weights, moments_cov, params_cov):
"""calculate the actual sensitivity to removal.
The sensitivity measure is calculated for each parameter wrt each moment.
It answers the following question: How much precision would be lost if the kth
moment was excluded from the estimation if "weights" is used as weighting
matrix?
Args:
sensitivity_to_bias (np.ndarray or pandas.DataFrame): See
``calculate_sensitivity_to_bias`` for details.
weights (np.ndarray or pandas.DataFrame): The weighting matrix used for
msm estimation.
moments_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the
empirical moments.
params_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the
parameter estimates.
Returns:
np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments)
"""
m4 = []
_jac, _weights, _moments_cov, _params_cov, names = process_pandas_arguments(
jac=jac, weights=weights, moments_cov=moments_cov, params_cov=params_cov
)
for k in range(len(_weights)):
weight_tilde_k = np.copy(_weights)
weight_tilde_k[k, :] = 0
weight_tilde_k[:, k] = 0
sigma_tilde_k = cov_robust(_jac, weight_tilde_k, _moments_cov)
m4k = sigma_tilde_k - _params_cov
m4k = m4k.diagonal()
m4.append(m4k)
m4 = np.array(m4).T
params_variances = np.diagonal(_params_cov)
e4 = m4 / params_variances.reshape(-1, 1)
if names:
e4 = pd.DataFrame(e4, index=names.get("params"), columns=names.get("moments"))
return e4
|
30f51ecc2c53126b6e46f5301bef857d104381cf
| 3,646,243
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.