content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import json
def app_durations():
"""Generate JavaScript for appDurations."""
return 'appDurations = ' + json.dumps(supported_durations)
|
3be9ecc801cd650a5cd1a3c4db1c50957ccfa1c0
| 3,648,229
|
def generic_cc(mag=10,dmag=8,band='K'):
"""Returns a generic contrast curve.
Keyword arguments:
mag -- magnitude of target star in passband
dmag -- can currently be either 8 or 4.5 (two example generic cc's being used)
band -- passband of observation.
"""
if dmag==8:
return fpp.ContrastCurveFromFile('%s/data/contrast_curves/ex8_K.txt' % KEPLERDIR,band,mag)
elif dmag==4.5:
return fpp.ContrastCurveFromFile('%s/data/contrast_curves/ex4.5_K.txt' % KEPLERDIR,band,mag)
|
04ca148c2a5b8d9eb2d0c60a0d6ad8e177901c5f
| 3,648,230
|
from typing import Any
def read_routes(*, db: Session = Depends(deps.get_db),data_in: schemas.DictDataCreate,current_user: models.User = Depends(deps.get_current_active_user)) -> Any:
"""
Retrieve Mock Data.
"""
db.add(models.Dict_Data(**jsonable_encoder(data_in)))
return {
"code": 20000,
"data": "",
"message":"修改成功",
}
|
09e575a8262a0818c7904e4e077d86f492f3407e
| 3,648,231
|
def get_companies_pagination_from_lagou(city_id=0, finance_stage_id=0, industry_id=0, page_no=1):
"""
爬取拉勾公司分页数据
:param city_id: 城市 id
:param finance_stage_id: 融资阶段 id
:param industry_id: 行业 id
:param page_no: 页码
:return: 拉勾公司分页数据
:rtype: utils.pagination.Pagination
"""
url = constants.COMPANIES_URL.format(city_id=city_id,
finance_stage_id=finance_stage_id,
industry_id=industry_id)
params = {'pn': page_no, 'sortField': constants.SORTED_BY_JOBS_COUNT}
response_json = utils.http_tools.requests_get(url=url, params=params).json()
pagination = utils.pagination.Pagination(per_page=int(response_json['pageSize']),
total=int(response_json['totalCount']))
return pagination
|
7a82e0dd7ad8ab960dbabb749e68867607b70878
| 3,648,232
|
def is_quant_contam(contam_model):
"""Get the flag for quantitative contamination"""
# the list of quantitative models
quant_models = ['GAUSS', 'FLUXCUBE']
# set the default value
isquantcont = True
# check whether the contamination is not quantitative
if not contam_model.upper() in quant_models:
# re-set the flag
isquantcont = False
# return the flag
return isquantcont
|
8a88609857ac8eb61bfddfa8d8227ffa237d2641
| 3,648,233
|
def nms_wrapper(scores, boxes, threshold = 0.7, class_sets = None):
"""
post-process the results of im_detect
:param scores: N * K numpy
:param boxes: N * (K * 4) numpy
:param class_sets: e.g. CLASSES = ('__background__','person','bike','motorbike','car','bus')
:return: a list of K-1 dicts, no background, each is {'class': classname, 'dets': None | [[x1,y1,x2,y2,score],...]}
"""
num_class = scores.shape[1] if class_sets is None else len(class_sets)
assert num_class * 4 == boxes.shape[1],\
'Detection scores and boxes dont match'
class_sets = ['class_' + str(i) for i in range(0, num_class)] if class_sets is None else class_sets
res = []
for ind, cls in enumerate(class_sets[1:]):
ind += 1 # skip background
cls_boxes = boxes[:, 4*ind : 4*(ind+1)]
cls_scores = scores[:, ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, thresh=0.3)
dets = dets[keep, :]
dets = dets[np.where(dets[:, 4] > threshold)]
r = {}
if dets.shape[0] > 0:
r['class'], r['dets'] = cls, dets
else:
r['class'], r['dets'] = cls, None
res.append(r)
return res
|
7f6a260811a1c20da40e41cc179488440bfc5164
| 3,648,234
|
def Rbf(
gamma: float = 1.0) -> InternalLayer:
"""Dual activation function for normalized RBF or squared exponential kernel.
Dual activation function is `f(x) = sqrt(2)*sin(sqrt(2*gamma) x + pi/4)`.
NNGP kernel transformation correspond to (with input dimension `d`)
`k = exp(- gamma / d * ||x - x'||^2) = exp(- gamma*(q11 + q22 - 2 * q12))`.
Args:
gamma:
related to characteristic length-scale (l) that controls width of the
kernel, where `gamma = 1 / (2 l^2)`.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
def fn(x):
return np.sqrt(2) * np.sin(np.sqrt(2 * gamma) * x + np.pi/4)
@_requires(diagonal_spatial=_Diagonal()) # pytype:disable=wrong-keyword-args
def kernel_fn(k: Kernel) -> Kernel:
"""Compute new kernels after an `Rbf` layer."""
cov1, nngp, cov2, ntk = k.cov1, k.nngp, k.cov2, k.ntk
sum11, sum12, sum22 = _get_diagonal_outer_prods(cov1,
cov2,
k.diagonal_batch,
k.diagonal_spatial,
op.add)
def nngp_ntk_fn(nngp, sum_, ntk):
nngp = np.exp(gamma * (-sum_ + 2 * nngp))
if ntk is not None:
ntk *= 2 * gamma * nngp
return nngp, ntk
def nngp_fn_diag(nngp):
return np.ones_like(nngp)
nngp, ntk = nngp_ntk_fn(nngp, sum12, ntk)
if k.diagonal_batch and k.diagonal_spatial:
cov1 = nngp_fn_diag(cov1)
if cov2 is not None:
cov2 = nngp_fn_diag(cov2)
else:
cov1, _ = nngp_ntk_fn(cov1, sum11, None)
if cov2 is not None:
cov2, _ = nngp_ntk_fn(cov2, sum22, None)
return k.replace(cov1=cov1, nngp=nngp, cov2=cov2, ntk=ntk)
return _elementwise(fn, f'Rbf({gamma})', kernel_fn)
|
c7c44f6227d0d337da40a1afe8eff359ccaebbf5
| 3,648,235
|
def create_returns_tear_sheet(returns, positions=None,
transactions=None,
live_start_date=None,
cone_std=(1.0, 1.5, 2.0),
benchmark_rets=None,
bootstrap=False,
turnover_denom='AGB',
header_rows=None,
return_fig=False):
"""
Generate a number of plots for analyzing a strategy's returns.
- Fetches benchmarks, then creates the plots on a single figure.
- Plots: rolling returns (with cone), rolling beta, rolling sharpe,
rolling Fama-French risk factors, drawdowns, underwater plot, monthly
and annual return plots, daily similarity plots,
and return quantile box plot.
- Will also print the start and end dates of the strategy,
performance statistics, drawdown periods, and the return range.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame, optional
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame, optional
Executed trade volumes and fill prices.
- See full explanation in create_full_tear_sheet.
live_start_date : datetime, optional
The point in time when the strategy began live trading,
after its backtest period.
cone_std : float, or tuple, optional
If float, The standard deviation to use for the cone plots.
If tuple, Tuple of standard deviation values to use for the cone plots
- The cone is a normal distribution with this standard deviation
centered around a linear regression.
benchmark_rets : pd.Series, optional
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
bootstrap : boolean, optional
Whether to perform bootstrap analysis for the performance
metrics. Takes a few minutes longer.
turnover_denom : str, optional
Either AGB or portfolio_value, default AGB.
- See full explanation in txn.get_turnover.
header_rows : dict or OrderedDict, optional
Extra rows to display at the top of the perf stats table.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
"""
if benchmark_rets is not None:
returns = utils.clip_returns_to_benchmark(returns, benchmark_rets)
plotting.show_perf_stats(returns, benchmark_rets,
positions=positions,
transactions=transactions,
turnover_denom=turnover_denom,
bootstrap=bootstrap,
live_start_date=live_start_date,
header_rows=header_rows)
plotting.show_worst_drawdown_periods(returns)
vertical_sections = 11
if live_start_date is not None:
vertical_sections += 1
live_start_date = ep.utils.get_utc_timestamp(live_start_date)
if benchmark_rets is not None:
vertical_sections += 1
if bootstrap:
vertical_sections += 1
fig = plt.figure(figsize=(14, vertical_sections * 6))
gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)
ax_rolling_returns = plt.subplot(gs[:2, :])
i = 2
ax_rolling_returns_vol_match = plt.subplot(gs[i, :],
sharex=ax_rolling_returns)
i += 1
ax_rolling_returns_log = plt.subplot(gs[i, :],
sharex=ax_rolling_returns)
i += 1
ax_returns = plt.subplot(gs[i, :],
sharex=ax_rolling_returns)
i += 1
if benchmark_rets is not None:
ax_rolling_beta = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_rolling_volatility = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_rolling_sharpe = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_drawdown = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_underwater = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_monthly_heatmap = plt.subplot(gs[i, 0])
ax_annual_returns = plt.subplot(gs[i, 1])
ax_monthly_dist = plt.subplot(gs[i, 2])
i += 1
ax_return_quantiles = plt.subplot(gs[i, :])
i += 1
plotting.plot_rolling_returns(
returns,
factor_returns=benchmark_rets,
live_start_date=live_start_date,
cone_std=cone_std,
ax=ax_rolling_returns)
ax_rolling_returns.set_title(
'Cumulative returns')
plotting.plot_rolling_returns(
returns,
factor_returns=benchmark_rets,
live_start_date=live_start_date,
cone_std=None,
volatility_match=(benchmark_rets is not None),
legend_loc=None,
ax=ax_rolling_returns_vol_match)
ax_rolling_returns_vol_match.set_title(
'Cumulative returns volatility matched to benchmark')
plotting.plot_rolling_returns(
returns,
factor_returns=benchmark_rets,
logy=True,
live_start_date=live_start_date,
cone_std=cone_std,
ax=ax_rolling_returns_log)
ax_rolling_returns_log.set_title(
'Cumulative returns on logarithmic scale')
plotting.plot_returns(
returns,
live_start_date=live_start_date,
ax=ax_returns,
)
ax_returns.set_title(
'Returns')
if benchmark_rets is not None:
plotting.plot_rolling_beta(
returns, benchmark_rets, ax=ax_rolling_beta)
plotting.plot_rolling_volatility(
returns, factor_returns=benchmark_rets, ax=ax_rolling_volatility)
plotting.plot_rolling_sharpe(
returns, ax=ax_rolling_sharpe)
# Drawdowns
plotting.plot_drawdown_periods(
returns, top=5, ax=ax_drawdown)
plotting.plot_drawdown_underwater(
returns=returns, ax=ax_underwater)
plotting.plot_monthly_returns_heatmap(returns, ax=ax_monthly_heatmap)
plotting.plot_annual_returns(returns, ax=ax_annual_returns)
plotting.plot_monthly_returns_dist(returns, ax=ax_monthly_dist)
plotting.plot_return_quantiles(
returns,
live_start_date=live_start_date,
ax=ax_return_quantiles)
if bootstrap and (benchmark_rets is not None):
ax_bootstrap = plt.subplot(gs[i, :])
plotting.plot_perf_stats(returns, benchmark_rets,
ax=ax_bootstrap)
elif bootstrap:
raise ValueError('bootstrap requires passing of benchmark_rets.')
for ax in fig.axes:
plt.setp(ax.get_xticklabels(), visible=True)
if return_fig:
return fig
|
5d01bb52ed3bd642ed8c7743ee41b4d57f732c2f
| 3,648,237
|
def vectorize_text(text_col: pd.Series,
vec_type: str = 'count',
**kwargs):
"""
Vectorizes pre-processed text. Instantiates the vectorizer and
fit_transform it to the data provided.
:param text_col: Pandas series, containing preprocessed text.
:param vec_type: string indicating what type of vectorization
(count or tfidf currently).
:param **kwargs: dict of keyworded arguments for sklearn vectorizer
functions.
:return: A tuple containing vectorized (doc-feature matrix that as d rows
and f columns for count and tfidf vectorization) and vectorizer_obj
(vectorization sklearn object representing trained vectorizer).
"""
# Check if vectorization type is supported
assert vec_type in ['count', 'tfidf']
# Get raw values from pandas series
text_raw = text_col.tolist()
# Lets the vectorizer know the input has already been pre-tokenized
# and is now delimited by whitespaces
kwargs['analyzer'] = str.split
# Apply proper vectorization
if vec_type == 'count':
count_vec = CountVectorizer(**kwargs)
vectorized = count_vec.fit_transform(text_raw)
vectorizer_obj = count_vec
elif vec_type == 'tfidf':
tfidf_vec = TfidfVectorizer(**kwargs)
vectorized = tfidf_vec.fit_transform(text_raw)
vectorizer_obj = tfidf_vec
# Return vectorized object
return vectorized, vectorizer_obj
|
fd1b720c5eee83d788684a49f5fe7ad26e955016
| 3,648,238
|
def creation_LS(X,y,N):
"""Generates a random learning set of size N from the data in X
(containing the input samples) and in y (containing the corresponding
output values).
Parameters
----------
X: array containing the input samples
y: array containing the corresponding output values
Return
------
X_random_rows : array of shape [N, (number of columns of X)]
y_random_rows : array of shape [N]
"""
number_of_rows = X.shape[0]
random_indices = np.random.choice(number_of_rows, size=N, replace=False)
X_random_rows = X[random_indices, :]
y_random_rows= y[random_indices]
return X_random_rows, y_random_rows
|
fdcf5fe96082a75b096b43747f940b8cf46f326b
| 3,648,239
|
def print_summary(show="all",
blocks=False, cid=True, blobs=True, size=True,
typ=False, ch=False, ch_online=True,
name=True, title=False, path=False,
sanitize=False,
start=1, end=0, channel=None, invalid=False,
reverse=False,
file=None, fdate=False, sep=";",
server="http://localhost:5279"):
"""Print a summary of the items downloaded from the LBRY network.
Parameters
----------
show: str, optional
It defaults to `'all'`, in which case it shows all items.
If it is `'incomplete'` it will show claims that are missing blobs.
If it is `'full'` it will show claims that have all blobs.
If it is `'media'` it will show claims that have the media file
(mp4, mp3, mkv, etc.).
Normally only items that have all blobs also have a media file;
however, if the claim is currently being downloaded
a partial media file may be present.
If it is `'missing'` it will show claims that don't have
the media file, whether the full blobs are present or not.
blocks: bool, optional
It defaults to `False`, in which case it won't print
the `height` block of the claims.
If it is `True` it will print this value, which gives some idea
of when the claim was registered in the blockchain.
cid: bool, optional
It defaults to `True`.
Show the `'claim_id'` of the claim.
It is a 40 character alphanumeric string.
blobs: bool, optional
It defaults to `True`.
Show the number of blobs in the file, and how many are complete.
size: bool, optional
It defaults to `True`.
Show the length of the stream in minutes and seconds, like `14:12`,
when possible (audio and video), and also the size in mebibytes (MB).
typ: bool, optional
It defaults to `False`.
Show the type of claim (video, audio, document, etc.)
ch: bool, optional
It defaults to `False`.
Show the name of the channel that published the claim.
This is slow if `ch_online=True`.
ch_online: bool, optional
It defaults to `True`, in which case it searches for the channel name
by doing a reverse search of the item online. This makes the search
slow.
By setting it to `False` it will consider the channel name
stored in the input dictionary itself, which will be faster
but it won't be the full name of the channel. If no channel is found
offline, then it will set a default value `'_None_'` just so
it can be printed with no error.
This parameter only has effect if `ch=True`, or if `channel`
is used, as it internally sets `ch=True`.
name: bool, optional
It defaults to `True`.
Show the name of the claim.
title: bool, optional
It defaults to `False`.
Show the title of the claim.
path: bool, optional
It defaults to `False`.
Show the full path of the saved media file.
sanitize: bool, optional
It defaults to `False`, in which case it will not remove the emojis
from the name of the claim and channel.
If it is `True` it will remove these unicode characters.
This option requires the `emoji` package to be installed.
start: int, optional
It defaults to 1.
Show claims starting from this index in the list of items.
end: int, optional
It defaults to 0.
Show claims until and including this index in the list of items.
If it is 0, it is the same as the last index in the list.
channel: str, optional
It defaults to `None`.
It must be a channel's name, in which case it shows
only the claims published by this channel.
Using this parameter sets `ch=True`.
invalid: bool, optional
It defaults to `False`, in which case it prints every single claim
previously downloaded.
If it is `True` it will only print those claims that are 'invalid',
that is, those that cannot be resolved anymore from the online
database. This probably means that the author decided to remove
the claims at some point after they were downloaded originally.
This can be verified with the blockchain explorer, by following
the claim ID for an 'unspent' transaction.
Using this parameter sets `ch_online=False` as the channel name
of invalid claims cannot be resolved online, only from the offline
database.
reverse: bool, optional
It defaults to `False`, in which case older items come first
in the output list.
If it is `True` newer claims are at the beginning of the list.
file: str, optional
It defaults to `None`.
It must be a writable path to which the summary will be written.
Otherwise the summary will be printed to the terminal.
fdate: bool, optional
It defaults to `False`.
If it is `True` it will add the date to the name of the summary file.
sep: str, optional
It defaults to `;`. It is the separator character between
the data fields in the printed summary. Since the claim name
can have commas, a semicolon `;` is used by default.
server: str, optional
It defaults to `'http://localhost:5279'`.
This is the address of the `lbrynet` daemon, which should be running
in your computer before using any `lbrynet` command.
Normally, there is no need to change this parameter from its default
value.
Returns
-------
bool
It returns `True` if it printed the summary successfully.
If there is any error it will return `False`.
"""
if not funcs.server_exists(server=server):
return False
output = sort.sort_items_size(reverse=False, invalid=invalid,
server=server)
items = output["claims"]
if not items or len(items) < 1:
if file:
print("No file written.")
return False
if invalid:
ch_online = False
print()
status = prnt.print_items(items=items, show=show,
blocks=blocks, cid=cid, blobs=blobs,
size=size,
typ=typ, ch=ch, ch_online=ch_online,
name=name, title=title, path=path,
sanitize=sanitize,
start=start, end=end, channel=channel,
reverse=reverse,
file=file, fdate=fdate, sep=sep,
server=server)
return status
|
6888917bd6a944c6e91c0d9796383b279db68315
| 3,648,240
|
import logging
def load_embeddings(path):
"""
Load embeddings from file and put into dict.
:param path: path to embeddings file
:return: a map word->embedding
"""
logging.info('Loading embeddings...')
embeddings = dict()
with open(path, 'r') as f:
for line in f:
line = line.split(' ')
embeddings[line[0]] = np.array([float(a) for a in line[1:]])
return embeddings
|
3e7e05cc9131dfb9d06c4c220d5e13d6965180b7
| 3,648,243
|
def format_component_descriptor(name, version):
"""
Return a properly formatted component 'descriptor' in the format
<name>-<version>
"""
return '{0}-{1}'.format(name, version)
|
2edb92f20179ae587614cc3c9ca8198c9a4c240e
| 3,648,245
|
import sqlite3
def dbconn():
"""
Initializing db connection
"""
sqlite_db_file = '/tmp/test_qbo.db'
return sqlite3.connect(sqlite_db_file, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
|
b0c6dd235490cee93ada20f060d681a319b120f0
| 3,648,246
|
import hashlib
def md5(fname):
"""
Compute the md5 of a file in chunks.
Avoid running out of memory when hashing large files.
"""
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
|
9e0bfbd625df6a46d5bff4cd0e9f065d1eaf8a4b
| 3,648,247
|
def get_r(x, y, x1, y1):
"""
Get r vector following Xu et al. (2006) Eq. 4.2
x, y = arrays; x1, y1 = single points; or vice-versa
"""
return ((x-x1)**2 + (y-y1)**2)**0.5
|
424408f86e6e3301ee6eca72e2da7da5bf1f8140
| 3,648,248
|
import re
def replace_empty_bracket(tokens):
"""
Remove empty bracket
:param tokens: List of tokens
:return: Fixed sequence
"""
merged = "".join(tokens)
find = re.search(r"\{\}", merged)
while find:
merged = re.sub(r"\{\}", "", merged)
find = re.search(r"\{\}", merged)
return list(merged)
|
fd2c9f2f1c2e199056e89dbdba65f92e4d5834eb
| 3,648,249
|
def presentation():
"""
This route is the final project and will be test
of all previously learned skills.
"""
return render_template("")
|
c592ae4b28c5c9b89592c3842e73c0e76cc4bd66
| 3,648,250
|
def extra_credit(grades,students,bonus):
"""
Returns a copy of grades with extra credit assigned
The dictionary returned adds a bonus to the grade of
every student whose netid is in the list students.
Parameter grades: The dictionary of student grades
Precondition: grades has netids as keys, ints as values.
Parameter netids: The list of students to give extra credit
Precondition: netids is a list of valid (string) netids
Parameter bonus: The extra credit bonus to award
Precondition: bonus is an int
"""
# DICTIONARY COMPREHENSION
#return { k:(grades[k]+bonus if k in students else grades[k]) for k in grades }
# ACCUMULATOR PATTERN
result = {}
for k in grades:
if k in students:
result[k] = grades[k]+bonus
else:
result[k] = grades[k]
return result
|
334a9edb3d1d045832009e20c6cba7f24e5c181d
| 3,648,251
|
def get_geo_signal_combos(data_source):
"""
Get list of geo type-signal type combinations that we expect to see.
Cross references based on combinations reported available by COVIDcast metadata.
"""
meta = covidcast.metadata()
source_meta = meta[meta['data_source'] == data_source]
# Need to convert np.records to tuples so they are hashable and can be used in sets and dicts.
geo_signal_combos = list(map(tuple,
source_meta[["geo_type", "signal"]].to_records(index=False)))
print("Number of expected geo region-signal combinations:",
len(geo_signal_combos))
return geo_signal_combos
|
90d030372b3e7d9ed2de0d53b6aa42fdf3723355
| 3,648,252
|
def absolute_(x, track_types = True, **kwargs):
"""Compute the absolute value of x.
Parameters
----------
x : :obj:`xarray.DataArray`
Data cube containing the values to apply the operator to.
track_types : :obj:`bool`
Should the operator promote the value type of the output object, based
on the value type of the input object?
**kwargs:
Ignored.
Returns
-------
:obj:`xarray.DataArray`
A data cube with the same shape as ``x`` containing the results of all
evaluated expressions.
Note
-----
When tracking value types, this operator uses the following type promotion
manual, with the keys being the supported value types of ``x``, and the
corresponding value being the promoted value type of the output.
.. exec_code::
:hide_code:
from semantique.processor.types import TYPE_PROMOTION_MANUALS
obj = TYPE_PROMOTION_MANUALS["absolute"]
obj.pop("__preserve_labels__")
print(obj)
"""
if track_types:
promoter = TypePromoter(x, function = "absolute")
promoter.check()
f = lambda x: np.absolute(x)
out = xr.apply_ufunc(f, x)
if track_types:
out = promoter.promote(out)
return out
|
b88c6662890832b0d54f752e59c97c9a9ca9ceb5
| 3,648,253
|
def any_input(sys_, t, input_signal=0, init_cond=None, *, plot=True):
"""
Accept any input signal, then calculate the response of the system.
:param sys_: the system
:type sys_: TransferFunction | StateSpace
:param t: time
:type t: array_like
:param input_signal: input signal accepted by the system
:type input_signal: numbers.Real | np.ndarray
:param init_cond: initial condition of the system
:type init_cond: None | numbers.Real | np.ndarray
:param plot: If plot is True, it will show the response curve.
:type plot: bool
:return: system output and time array
:rtype: tuple[np.ndarray, np.ndarray]
"""
if isinstance(sys_, TransferFunction):
sys_ = tf2ss(sys_)
u = _setup_input_signal(input_signal, t, sys_.inputs)
y, t = _any_input(sys_, t, u, init_cond)
if plot:
plot_response_curve(y, t, "response", sys_.is_ctime)
return y, t
|
fc6d72b5c22d585a4ba7c5be895b1266e72e70dd
| 3,648,254
|
from .mnext import mnext
def mnext_mbv2_cfg(pretrained=False,in_chans=3,drop_rate=0.2,drop_connect_rate=0.5,bn_tf=False,bn_momentum=0.9,bn_eps=0.001, global_pool=False, **kwargs):
"""Creates a MNeXt Large model. Tensorflow compatible variant
"""
model = mnext(**kwargs)
return model
|
5f8fcdcaa6abf4047b4fc06ea7dcb92f6fbeade7
| 3,648,256
|
def _embeddings_from_arguments(column,
args,
weight_collections,
trainable,
output_rank=2):
"""Returns embeddings for a column based on the computed arguments.
Args:
column: the column name.
args: the _DeepEmbeddingLookupArguments for this column.
weight_collections: collections to store weights in.
trainable: whether these embeddings should be trainable.
output_rank: the desired rank of the returned `Tensor`. Inner dimensions will
be combined to produce the desired rank.
Returns:
the embeddings.
Raises:
ValueError: if not possible to create.
"""
# pylint: disable=protected-access
input_tensor = layers._inner_flatten(args.input_tensor, output_rank)
weight_tensor = None
if args.weight_tensor is not None:
weight_tensor = layers._inner_flatten(args.weight_tensor, output_rank)
# pylint: enable=protected-access
# This option is only enabled for scattered_embedding_column.
if args.hash_key:
embeddings = contrib_variables.model_variable(
name="weights",
shape=[args.vocab_size],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=(trainable and args.trainable),
collections=weight_collections)
return embedding_ops.scattered_embedding_lookup_sparse(
embeddings,
input_tensor,
args.dimension,
hash_key=args.hash_key,
combiner=args.combiner,
name="lookup")
if args.shared_embedding_name is not None:
shared_embedding_collection_name = (
"SHARED_EMBEDDING_COLLECTION_" + args.shared_embedding_name.upper())
graph = ops.get_default_graph()
shared_embedding_collection = (
graph.get_collection_ref(shared_embedding_collection_name))
shape = [args.vocab_size, args.dimension]
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError(
"Collection %s can only contain one "
"(partitioned) variable." % shared_embedding_collection_name)
else:
embeddings = shared_embedding_collection[0]
if embeddings.get_shape() != shape:
raise ValueError(
"The embedding variable with name {} already "
"exists, but its shape does not match required "
"embedding shape here. Please make sure to use "
"different shared_embedding_name for different "
"shared embeddings.".format(args.shared_embedding_name))
else:
embeddings = contrib_variables.model_variable(
name=args.shared_embedding_name,
shape=shape,
dtype=dtypes.float32,
initializer=args.initializer,
trainable=(trainable and args.trainable),
collections=weight_collections)
graph.add_to_collection(shared_embedding_collection_name, embeddings)
else:
embeddings = contrib_variables.model_variable(
name="weights",
shape=[args.vocab_size, args.dimension],
dtype=dtypes.float32,
initializer=args.initializer,
trainable=(trainable and args.trainable),
collections=weight_collections)
if _is_variable(embeddings):
embeddings = [embeddings]
else:
embeddings = embeddings._get_variable_list() # pylint: disable=protected-access
# pylint: disable=protected-access
_maybe_restore_from_checkpoint(column._checkpoint_path(), embeddings)
return embedding_ops.safe_embedding_lookup_sparse(
embeddings,
input_tensor,
sparse_weights=weight_tensor,
combiner=args.combiner,
name=column.name + "weights",
max_norm=args.max_norm)
|
30c305bfbf20d48af48dd25aa32c1648f8f95fce
| 3,648,257
|
def stuw_laagstedoorstroombreedte(damo_gdf=None, obj=None, damo_doorstroombreedte="DOORSTROOMBREEDTE",
damo_kruinvorm="WS_KRUINVORM"):
"""
als LAAGSTEDOORSTROOMHOOGTE is NULL en WS_KRUINVORM =3 (rechthoek) dan LAAGSTEDOORSTROOMBREEDTE = DOORSTROOMBREEDTE
"""
return damo_gdf.apply(
lambda x: _stuw_get_laagstedoorstroombreedte_rechthoek(x[damo_kruinvorm], x[damo_doorstroombreedte]), axis=1)
|
534d917326222ef77fc0a8022ed84ea08bb0be0a
| 3,648,258
|
def manage_categories():
"""
Display all categories to manage categories page (admin only)
"""
# Denied user access to manage_categories page
if session["user"] != "admin":
return redirect(url_for('error', code=403))
# query for all categories from categories collection
manage_categories = list(mongo.db.categories.find().sort(
"category_name", 1))
# get the categories that are in use for navigation menu
nav_categories = mongo.db.recipes.distinct("category_name")
# call the paginated function to display only the
# specific number of categories per page
paginated_categories = paginated(manage_categories)
# get the page pagination
pagination = get_pagination(manage_categories)
# total number of categories found
total = len(manage_categories)
# set up the page_set object
page_set = {
"title": "Manage Categories",
"type": "form"
}
return render_template("pages/manage_categories.html",
page_set=page_set,
nav_categories=nav_categories,
manage_categories=paginated_categories,
pagination=pagination,
total=total)
|
5002375f904240f2274aa8040b426da8515122a7
| 3,648,259
|
def callback(id):
"""
获取指定记录
"""
# 检查用户权限
_common_logic.check_user_power()
_positions_logic = positions_logic.PositionsLogic()
# 读取记录
result = _positions_logic.get_model_for_cache(id)
if result:
# 直接输出json
return web_helper.return_msg(0, '成功', result)
else:
return web_helper.return_msg(-1, "查询失败")
|
3451cc1ebb18004f46847f6538c751afd86bdf74
| 3,648,260
|
def sort_cluster(x: list, t: np.ndarray) -> list:
"""
sort x according to t
:param x:
:param t:
:return:
"""
return [x[i] for i in np.argsort(t)]
|
a2bcd57bb9c402aa19f12483e792f1e4379c4481
| 3,648,262
|
def gettof(*args):
"""gettof(flags_t F) -> ushort"""
return _idaapi.gettof(*args)
|
d377fc28b7515a45112083fc38c722b82caee0b9
| 3,648,263
|
def generate(temp):
"""
Wrapper that checks generated names against the base street names to avoid a direct
regurgitation of input data.
returns list
"""
is_in_dict = True
while is_in_dict:
result = textgen.generate(temperature=temp, return_as_list=True)
str = ' '.join(result)
is_in_dict = basenames.get(str, False)
return result
|
2bfc6d366d0543d6ada762539c0c6cb301d729a8
| 3,648,264
|
import re
def __create_pyramid_features(backbone_dict,
ndim=2,
feature_size=256,
include_final_layers=True,
lite=False,
upsample_type='upsamplelike',
interpolation='bilinear',
z_axis_convolutions=False):
"""Creates the FPN layers on top of the backbone features.
Args:
backbone_dict (dictionary): A dictionary of the backbone layers, with
the names as keys, e.g. ``{'C0': C0, 'C1': C1, 'C2': C2, ...}``
feature_size (int): The feature size to use for
the resulting feature levels.
include_final_layers (bool): Add two coarser pyramid levels
ndim (int): The spatial dimensions of the input data.
Must be either 2 or 3.
lite (bool): Whether to use depthwise conv instead of regular conv for
feature pyramid construction
upsample_type (str): Choice of upsampling methods
from ``['upsamplelike','upsamling2d','upsampling3d']``.
interpolation (str): Choice of interpolation mode for upsampling
layers from ``['bilinear', 'nearest']``.
Returns:
dict: The feature pyramid names and levels,
e.g. ``{'P3': P3, 'P4': P4, ...}``
Each backbone layer gets a pyramid level, and two additional levels
are added, e.g. ``[C3, C4, C5]`` --> ``[P3, P4, P5, P6, P7]``
Raises:
ValueError: ``ndim`` is not 2 or 3
ValueError: ``upsample_type`` not in
``['upsamplelike','upsampling2d', 'upsampling3d']``
"""
# Check input to ndims
acceptable_ndims = [2, 3]
if ndim not in acceptable_ndims:
raise ValueError('Only 2 and 3 dimensional networks are supported')
# Check if inputs to ndim and lite are compatible
if ndim == 3 and lite:
raise ValueError('lite models are not compatible with 3 dimensional '
'networks')
# Check input to interpolation
acceptable_interpolation = {'bilinear', 'nearest'}
if interpolation not in acceptable_interpolation:
raise ValueError('Interpolation mode "{}" not supported. '
'Choose from {}.'.format(
interpolation, list(acceptable_interpolation)))
# Check input to upsample_type
acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'}
if upsample_type not in acceptable_upsample:
raise ValueError('Upsample method "{}" not supported. '
'Choose from {}.'.format(
upsample_type, list(acceptable_upsample)))
# Get names of the backbone levels and place in ascending order
backbone_names = get_sorted_keys(backbone_dict)
backbone_features = [backbone_dict[name] for name in backbone_names]
pyramid_names = []
pyramid_finals = []
pyramid_upsamples = []
# Reverse lists
backbone_names.reverse()
backbone_features.reverse()
for i, N in enumerate(backbone_names):
level = int(re.findall(r'\d+', N)[0])
pyramid_names.append('P{}'.format(level))
backbone_input = backbone_features[i]
# Don't add for the bottom of the pyramid
if i == 0:
if len(backbone_features) > 1:
upsamplelike_input = backbone_features[i + 1]
else:
upsamplelike_input = None
addition_input = None
# Don't upsample for the top of the pyramid
elif i == len(backbone_names) - 1:
upsamplelike_input = None
addition_input = pyramid_upsamples[-1]
# Otherwise, add and upsample
else:
upsamplelike_input = backbone_features[i + 1]
addition_input = pyramid_upsamples[-1]
pf, pu = create_pyramid_level(backbone_input,
upsamplelike_input=upsamplelike_input,
addition_input=addition_input,
upsample_type=upsample_type,
level=level,
ndim=ndim,
lite=lite,
interpolation=interpolation,
z_axis_convolutions=z_axis_convolutions)
pyramid_finals.append(pf)
pyramid_upsamples.append(pu)
# Add the final two pyramid layers
if include_final_layers:
# "Second to last pyramid layer is obtained via a
# 3x3 stride-2 conv on the coarsest backbone"
N = backbone_names[0]
F = backbone_features[0]
level = int(re.findall(r'\d+', N)[0]) + 1
P_minus_2_name = 'P{}'.format(level)
if ndim == 2:
P_minus_2 = Conv2D(feature_size, kernel_size=(3, 3),
strides=(2, 2), padding='same',
name=P_minus_2_name)(F)
else:
P_minus_2 = Conv3D(feature_size, kernel_size=(1, 3, 3),
strides=(1, 2, 2), padding='same',
name=P_minus_2_name)(F)
pyramid_names.insert(0, P_minus_2_name)
pyramid_finals.insert(0, P_minus_2)
# "Last pyramid layer is computed by applying ReLU
# followed by a 3x3 stride-2 conv on second to last layer"
level = int(re.findall(r'\d+', N)[0]) + 2
P_minus_1_name = 'P{}'.format(level)
P_minus_1 = Activation('relu', name='{}_relu'.format(N))(P_minus_2)
if ndim == 2:
P_minus_1 = Conv2D(feature_size, kernel_size=(3, 3),
strides=(2, 2), padding='same',
name=P_minus_1_name)(P_minus_1)
else:
P_minus_1 = Conv3D(feature_size, kernel_size=(1, 3, 3),
strides=(1, 2, 2), padding='same',
name=P_minus_1_name)(P_minus_1)
pyramid_names.insert(0, P_minus_1_name)
pyramid_finals.insert(0, P_minus_1)
pyramid_dict = dict(zip(pyramid_names, pyramid_finals))
return pyramid_dict
|
956a04a1ebe14e11061de009894b27e7c2640cb2
| 3,648,265
|
def graphviz(self, filename=None, directory=None, isEdge=False,showLabel=True, **kwargs):
"""Return graphviz source for visualizing the lattice graph."""
return lattice(self, filename, directory, isEdge, showLabel, **kwargs)
|
1c7426efe0f0379822c4c9c0a765a615f26f04a1
| 3,648,266
|
def get_rectangle(origin, end):
"""Return all points of rectangle contained by origin and end."""
size_x = abs(origin[0]-end[0])+1
size_y = abs(origin[1]-end[1])+1
rectangle = []
for x in range(size_x):
for y in range(size_y):
rectangle.append((origin[0]+x, origin[1]+y))
return rectangle
|
36badfd8aefaaeda806215b02ed6e92fce6509a3
| 3,648,267
|
def corr_list(df, target, thresh=0.1, sort=True, fill=True):
"""
List Most Correlated Features
Returns a pandas Series with the most correlated features to a certain
target variable. The function will return features with a correlation value
bigger than some threshold, which can be adjusted.
Parameters
----------
df : pandas DataFrame
`df` must contain only numerical values.
target : str or int
String or integer indicating the target variable.
thresh : float, optional
Float indicating the minimum correlation between a feature and the
target above wich the feature will be present in the returned list.
Default value is 0.1.
sort : bool, optional
Wheter to sort the returned pandas Series. If True, it will be sorted
descending. Default value is False.
fill : bool, optional
Wheter to fill null values. If True, Null values will be replaced
with 0's. Default value is False.
Returns
-------
pandas Series
"""
if fill:
interest = df.corr().fillna(0)[target]
else:
interest = df.corr()[target]
interest = interest[np.abs(interest) > thresh]
if len(interest) > 0:
if sort:
return interest.sort_values(ascending=False)
else:
return interest
else:
return []
|
d9562d1bbc7947338cf87ddc6703ef54a21554e0
| 3,648,268
|
def compute_epsilon(steps):
"""Computes epsilon value for given hyperparameters."""
if FLAGS.noise_multiplier == 0.0:
return float('inf')
orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))
sampling_probability = FLAGS.batch_size / NUM_TRAIN_EXAMPLES
rdp = compute_rdp(q=sampling_probability,
noise_multiplier=FLAGS.noise_multiplier,
steps=steps,
orders=orders)
# Delta is set to 1e-5 because MNIST has 60000 training points.
return get_privacy_spent(orders, rdp, target_delta=1e-5)[0]
|
9819cafafeec66cd29b13a916432c839e4365ded
| 3,648,269
|
def get_native_includes(object):
"""
After method association, check which native types an object uses
and return a corresponding string list of include file
This will also add the include needed for inheritance
"""
includes = set()
for proc in object.procs:
for argname,arg in proc.args.items():
if arg.native:
includes.add(arg.type.type)
if arg.type.matrix and not opts.no_fmat:
includes.add(matrix_classname)
if arg.type.type=='CHARACTER' and arg.intent!='in':
if opts.std_string:
# The use of angle brackets is handled specially
# in the output code
includes.add('<string>')
else:
includes.add(string_classname)
if proc.retval and proc.retval.type.dt and proc.retval.pointer:
includes.add(proc.retval.type.type)
# For inheritance:
if object.extends:
includes.add(object.extends)
return includes
|
0c09d39bd61b5a711bd718dcb38fab7e4e1e01bf
| 3,648,270
|
import torch
def dice_coeff(input, target):
"""Dice coeff for batches"""
if input.is_cuda:
s = torch.FloatTensor(1).to(device_f).zero_()
else:
s = torch.FloatTensor(1).zero_()
for i, c in enumerate(zip(input, target)):
s = s + DiceCoeff().forward(c[0], c[1])
return s / (i + 1)
|
da390729d2e1d8e2ae53814f8ac398a6c7e5380a
| 3,648,272
|
def group_error_rates(labels, predictions, groups):
"""Returns a list containing error rates for each protected group."""
errors = []
for jj in range(groups.shape[1]):
if groups[:, jj].sum() == 0: # Group is empty?
errors.append(0.0)
else:
signed_labels_jj = 2 * labels[groups[:, jj] == 1] - 1
predictions_jj = predictions[groups[:, jj] == 1]
errors.append(np.mean(signed_labels_jj * predictions_jj <= 0))
return errors
|
0b390dfde16910332f10afacaa2f9031c04d846a
| 3,648,273
|
def get_emails_by_user_names(user_names):
"""Get emails by user names."""
emails_service = emails_digest_service.DailyEmailsService()
emails_service.open_emails_digest()
user_emails_dict = dict.fromkeys(user_names)
for user_name in user_names:
user_emails_dict[user_name] = emails_service.get_email_by_user_name(
user_name)
return user_emails_dict
|
331d5799bac79c08240770260306ba84bf2f568b
| 3,648,274
|
def inbound_and_outbound_node_sets(C, CT):
"""
Returns the set of nodes that can reach an event and can be reached by an event,
and the difference between those sets (outbound / inbound).
"""
inbound = defaultdict(set)
for node, event in zip(*np.nonzero(C)):
inbound[event].add(node)
outbound = defaultdict(set)
for node, event in zip(*np.nonzero(CT)):
outbound[event].add(node)
difference = {}
for event, in_nodes in inbound.items():
difference[event] = outbound[event] - in_nodes
return inbound, outbound, difference
|
517746700a7a978a49a597237db362eee98d91b6
| 3,648,275
|
def policy(Q):
"""Hard max over prescriptions
Params:
-------
* Q: dictionary of dictionaries
Nested dictionary representing a table
Returns:
-------
* policy: dictonary of states to policies
"""
pol = {}
for s in Q:
pol[s] = max(Q[s].items(), key=lambda x: x[1])[0]
return pol
|
e69f66fba94b025034e03428a5e93ba1b95918e8
| 3,648,276
|
def fft(series):
"""
FFT of a series
Parameters
----------
series
Returns
-------
"""
signal = series.values
time = series.index
dt = np.mean(np.diff(time))
#n = 11*len(time)
n = 50000
frequencies = np.fft.rfftfreq(n=n, d=dt) # [Hz]
dft = np.abs(np.fft.rfft(signal, n=n))
return frequencies, dft
|
a6d1f7cfa45d504a86b434702a49eafa08737006
| 3,648,277
|
def local_variance(V, tsize=5):
""" local non-linear variance calculation
Parameters
----------
V : numpy.array, size=(m,n), dtype=float
array with one velocity component, all algorithms are indepent of their
axis.
Parameters
----------
sig_V : numpy.array, size=(m,n), dtype=float
statistical local variance, based on the procedure described in [1],[2]
References
----------
.. [1] Joughin "Ice-sheet velocity mapping: a combined interferometric and
speckle-tracking approach", Annuals of glaciology vol.34 pp.195-201.
.. [2] Joughin et al. "Greenland ice mapping project 2 (GIMP-2) algorithm
theoretical basis document", Making earth system data records for use in
research environment (MEaSUREs) documentation.
"""
V_class = local_mad_filter(V, tsize=tsize)
V[V_class] = np.nan
V_0 = local_infilling_filter(V, tsize=tsize)
# running mean adjustment
mean_kernel = np.ones((tsize, tsize), dtype=float)/(tsize**2)
V = ndimage.convolve(V, mean_kernel)
# plane fitting and variance of residual
sig_V = local_nonlin_var_filter(V, tsize=tsize)
return sig_V
|
e7e10f8c73f01b20a27ad06813defdd406bf977a
| 3,648,278
|
def get_virtual_device_configuration(device):
"""Get the virtual device configuration for a PhysicalDevice.
Returns the list of VirtualDeviceConfiguration objects previously configured
by a call to `tf.config.experimental.set_virtual_device_configuration()`.
For example:
>>> physical_devices = tf.config.experimental.list_physical_devices('CPU')
>>> assert len(physical_devices) == 1, "No CPUs found"
>>> configs = tf.config.experimental.get_virtual_device_configuration(
... physical_devices[0])
>>> try:
... assert configs is None
... tf.config.experimental.set_virtual_device_configuration(
... physical_devices[0],
... [tf.config.experimental.VirtualDeviceConfiguration(),
... tf.config.experimental.VirtualDeviceConfiguration()])
... configs = tf.config.experimental.get_virtual_device_configuration(
... physical_devices[0])
... assert len(configs) == 2
... except:
... # Cannot modify virtual devices once initialized.
... pass
Args:
device: PhysicalDevice to query
Returns:
List of `tf.config.experimental.VirtualDeviceConfiguration` objects or
`None` if no virtual device configuration has been set for this physical
device.
"""
return context.context().get_virtual_device_configuration(device)
|
49a99c17c2859bb40a7bfbbac840bf82310428e1
| 3,648,279
|
def user_directory_path(instance, filename):
"""Sets path to user uploads to: MEDIA_ROOT/user_<id>/<filename>"""
return f"user_{instance.user.id}/{filename}"
|
84be5fe74fa5059c023d746b2a0ff6e32c14c10d
| 3,648,280
|
def setup(app):
"""Setup the Sphinx extension."""
# Register builder.
app.add_builder(BeamerBuilder)
# Add setting for allowframebreaks.
app.add_config_value("beamer_allowframebreaks", True, "beamer")
# Add setting for Beamer theme.
app.add_config_value("beamer_theme", "Warsaw", "beamer")
# Adjust titles upon doctree-resolved.
app.connect("doctree-resolved", adjust_titles)
return {
"version": "1.0",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
cc5f48eeff65876a2d052dad285a77bc76e115c0
| 3,648,281
|
def extract_text(arg: Message_T) -> str:
"""
提取消息中的纯文本部分(使用空格合并纯文本消息段)。
参数:
arg (nonebot.typing.Message_T):
"""
arg_as_msg = Message(arg)
return arg_as_msg.extract_plain_text()
|
06d19c9ca4e907edf433f910600161d142ca914e
| 3,648,282
|
def dtw(x, y, dist, warp=1):
"""
Computes Dynamic Time Warping (DTW) of two sequences.
:param array x: N1*M array
:param array y: N2*M array
:param func dist: distance used as cost measure
:param int warp: how many shifts are computed.
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
"""
assert len(x)
assert len(y)
r, c = len(x), len(y)
D0 = zeros((r + 1, c + 1))
D0[0, 1:] = inf
D0[1:, 0] = inf
D1 = D0[1:, 1:] # view
for i in range(r):
for j in range(c):
D1[i, j] = dist(x[i], y[j])
C = D1.copy()
for i in range(r):
for j in range(c):
min_list = [D0[i, j]]
for k in range(1, warp + 1):
i_k = min(i + k, r - 1)
j_k = min(j + k, c - 1)
min_list += [D0[i_k, j], D0[i, j_k]]
D1[i, j] += min(min_list)
if len(x) == 1:
path = zeros(len(y)), range(len(y))
elif len(y) == 1:
path = range(len(x)), zeros(len(x))
else:
path = _traceback(D0)
return D1[-1, -1], C, D1, path
|
a30a492d816e5234590d9fadfbba722db0ae9f72
| 3,648,283
|
def format_line_count_break(padding: int) -> str:
"""Return the line count break."""
return format_text(
" " * max(0, padding - len("...")) + "...\n", STYLE["detector_line_start"]
)
|
2fe4d4b8195468787f31b3407d32a4e039f7bb6c
| 3,648,284
|
from typing import Tuple
from typing import get_args
def identify_generic_specialization_types(
cls: type, generic_class: type
) -> Tuple[type, ...]:
"""
Identify the types of the specialization of generic class the class cls derives from.
:param cls: class which derives from a specialization of generic class.
:param generic_class: a generic class.
:return: specialization types.
"""
return get_args(find_generic_specialization_parent_class(cls, generic_class))
|
3932062a5a4543b280ebc8601126e10d11136717
| 3,648,285
|
def Metadata():
"""Get a singleton that fetches GCE metadata.
Returns:
_GCEMetadata, An object used to collect information from the GCE metadata
server.
"""
def _CreateMetadata(unused_none):
global _metadata
if not _metadata:
_metadata = _GCEMetadata()
_metadata_lock.lock(function=_CreateMetadata, argument=None)
_metadata_lock.unlock()
return _metadata
|
096ac4f0278e0048944d5a10c4153be7c60aae88
| 3,648,286
|
def pa11y_counts(results):
"""
Given a list of pa11y results, return three integers:
number of errors, number of warnings, and number of notices.
"""
num_error = 0
num_warning = 0
num_notice = 0
for result in results:
if result['type'] == 'error':
num_error += 1
elif result['type'] == 'warning':
num_warning += 1
elif result['type'] == 'notice':
num_notice += 1
return num_error, num_warning, num_notice
|
346c1efe0cae5934e623a8643b0f23f85300181d
| 3,648,287
|
def parse_properties(df, columns_to_integer=None, columns_to_datetime=None, columns_to_numeric=None, columns_to_boolean=None, columns_to_string = None, dt_unit = 'ms', boolean_dict = {'true': True, 'false': False, '': None}):
"""
Parse string columns to other formats. This function is used in hubspot routine, its not yet scaled to other routines
df: pd.DataFrame
columns_to_: list with names of the columns to parse
return: pd.DataFrame with parsed columns
"""
if columns_to_integer:
df[columns_to_integer] = df[columns_to_integer].apply(string_to_integer)
if columns_to_datetime:
df[columns_to_datetime] = df[columns_to_datetime].apply(pd.to_datetime, unit = dt_unit)
if columns_to_numeric:
df[columns_to_numeric] = df[columns_to_numeric].apply(pd.to_numeric, errors = 'coerce', downcast='float')
if columns_to_boolean:
df[columns_to_boolean] = df[columns_to_boolean].replace(boolean_dict).astype('boolean')
if columns_to_string:
df[columns_to_string] = df[columns_to_string].apply(int_to_string)
return df
|
a162397308d98faac6ab24f07aceee439aa32095
| 3,648,288
|
import requests
def http_request(method, url, headers, data=None):
"""
Request util
:param method: GET or POST or PUT
:param url: url
:param headers: headers
:param data: optional data (needed for POST)
:return: response text
"""
response = requests.request(method, url, headers=headers, data=data)
if response.status_code not in [200, 201, 204]:
http_error_msg = u'%s HTTP request failed: %s for url: %s' % (response.status_code, response.text, url)
#print ("utils.http_request ", http_error_msg)
raise requests.exceptions.HTTPError(response.text)
return response.text
|
6d0453be79b3ae0f7ed60b5a8759b9295365dd6c
| 3,648,289
|
def parse_title(line):
"""if this is title, return Tuple[level, content],
@type line: str
@return: Optional[Tuple[level, content]]
"""
line = line.strip()
if not line.startswith('#'):
return None
sharp_count = 0
for c in line:
if c == '#':
sharp_count += 1
else:
break
if sharp_count == len(line):
return None
title = line[sharp_count:].strip()
return sharp_count, title
|
7c170f417755c878d225b780b8475a379501c19f
| 3,648,291
|
import typing
def issubtype(cls: type, clsinfo: type) -> bool:
"""
Return whether ``cls`` is a subclass of ``clsinfo`` while also considering
generics.
:param cls: the subject.
:param clsinfo: the object.
:return: True if ``cls`` is a subclass of ``clsinfo`` considering generics.
"""
info_generic_type, info_args = _split_generic(clsinfo)
if clsinfo in (typing.Any, object):
result = True
elif info_args:
result = _issubtype_generic(cls, info_generic_type, info_args)
else:
result = issubclass(_without_generic(cls), _without_generic(clsinfo))
return result
|
942d5760c3de4d63bcd9c3f5934fcc89727dc958
| 3,648,292
|
def delete_status(id):
"""Delete an existing status
The status to be deleted should be posted as JSON using
'application/json as the content type. The posted JSON needs to
have 2 required fields:
* user (the username)
* api_key
An example of the JSON::
{
"user": "r1cky",
"api_key": "qwertyuiopasdfghjklzxcvbnm1234567890"
}
"""
db = get_session(current_app)
# The data we need
user = request.json.get('user')
if not (id and user):
return jsonify(dict(error='Missing required fields.')), 400
status = db.query(Status).filter_by(id=id)
if not status.count():
return jsonify(dict(error='Status does not exist.')), 400
if not status[0].user.username == user:
return jsonify(dict(error='You cannot delete this status.')), 403
status.delete()
db.commit()
return jsonify(dict(id=id))
|
d6a9ebbc787283f3ac247935f3fe5ad9080d2bd0
| 3,648,293
|
def process_data(data):
""" Change labels, group by planner and format for latex."""
data = data.replace(
{
"grid_run_1": "Grid",
"prm_run_1": "PRM A",
"prm_run_2": "PRM B",
"prm_run_3": "PRM C",
}
)
data = data.rename(
columns={"num_samples": "samples", "cc_checks": "collision checks"}
)
df = data.groupby(["run"]).sum()[["samples", "jvm", "time", "collision checks"]]
df["samples"] = np.round(df["samples"])
df["time"] = np.round(df["time"])
df["samples"] = np.round(df["collision checks"])
sr = data.groupby(["run"]).sum()[["success"]]
df["solved"] = sr.astype(int).astype(str) + "/14"
latex = df.to_latex(
formatters={
"samples": "{:,.0f}".format,
"jvm": "{:.2f}".format,
"collision checks": "{:,.0f}".format,
"time": "{:.0f}".format,
}
)
return df, latex
|
24ac1c2ee872c5051eccc9774943f922671267b1
| 3,648,294
|
def detect_outlier(TS, samples_wind=60, order=3):
"""Find outliers in TS by interpolate one sample at a time, measure diff.
between rec. sample and interpolated, and getting the peaks in the int diff
across recording.
Parameters
-------------
TS : array (x, y) x n_samples
Times series to extract features
samples_wind : int
Window length of segment where a sample is interpolated.
order : int
B-sline interpolation order
Returns
--------
outliers: list of array n_chans [n_outliers]
Indices of outliers per chans
outliers_int: list of array n_chans [n_outliers]
New interpolated values of the outliers
"""
s_win_half = int(samples_wind/2)
outliers = []
outliers_int = []
zdiffs = []
for ts in TS:
n_samples, = ts.shape
diff = [np.nan]
ts_int_one = [np.nan]
for w in range(1,n_samples-1):
wix = [w-s_win_half,w+s_win_half]
# Bound beg or end if outside
wix[0] = 0 if wix[0]<0 else wix[0]
wix[1] = n_samples if wix[1]>n_samples else wix[1]
seg1, seg2 = ts[wix[0]:w], ts[w+1:wix[1]]
seg = np.concatenate((seg1,seg2))
# make indexes ts with and without sample
ixs = np.arange(seg.shape[0]+1)
ixs_out =np. delete(ixs, np.argwhere(ixs == seg1.shape[0]))
# Interpolate and measure diff
fcubic = interpolate.interp1d(ixs_out, seg, kind=order)
ts_int_out = fcubic(ixs)
smpl_int = ts_int_out[seg1.shape[0]]
diff.append(np.abs(smpl_int-ts[w]))
ts_int_one.append(smpl_int)
diff_z = zscore(diff)
pks_p, _ = feat_ext.find_maxmin_peaks(diff_z[1:], height=5)
pks_p = pks_p + 1 # add 1 sampl ( first is nan)
int_smp = np.array(ts_int_one)[pks_p]
outliers.append(pks_p)
outliers_int.append(int_smp)
zdiffs.append(diff_z)
return outliers, outliers_int, np.array(zdiffs)
|
91515770554155ddb0da507e94e4cffc611202d9
| 3,648,295
|
def postprocess(backpointers, best_tag_id):
"""Do postprocess."""
best_tag_id = best_tag_id.asnumpy()
batch_size = len(best_tag_id)
best_path = []
for i in range(batch_size):
best_path.append([])
best_local_id = best_tag_id[i]
best_path[-1].append(best_local_id)
for bptrs_t in reversed(backpointers):
bptrs_t = bptrs_t[0].asnumpy()
local_idx = bptrs_t[i]
best_local_id = local_idx[best_local_id]
best_path[-1].append(best_local_id)
# Pop off the start tag (we dont want to return that to the caller)
best_path[-1].pop()
best_path[-1].reverse()
return best_path
|
5be856610a3c81453c11c584507dcb4ad0e4cf61
| 3,648,296
|
def carnatic_string_to_ql_array(string_):
"""
:param str string_: A string of carnatic durations separated by spaces.
:return: The input string converted to a quarter length array.
:rtype: numpy.array.
>>> carnatic_string_to_ql_array('oc o | | Sc S o o o')
array([0.375, 0.25 , 0.5 , 0.5 , 1.5 , 1. , 0.25 , 0.25 , 0.25 ])
"""
split_string = string_.split()
vals = []
for token in split_string:
try:
if carnatic_symbols[token] is not None:
vals.append(carnatic_symbols[token]["value"])
except KeyError:
pass
return np.array(vals)
|
19386ac13233c3f5cc70eea7f75d287784f6a969
| 3,648,297
|
def login_redirect(request: HttpRequest) -> HttpResponse:
"""
Redirects the user to the Strava authorization page
:param request: HttpRequest
:return: HttpResponse
"""
strava_uri = get_strava_uri()
return redirect(strava_uri)
|
80eb714ab8f1fde25f2a3ce57bdc540a5a7a980d
| 3,648,299
|
def f(p, x):
"""
Parameters
----------
p : list
A that has a length of at least 2.
x : int or float
Scaling factor for the first variable in p.
Returns
-------
int or float
Returns the first value in p scaled by x, aded by the second value in p.
Examples
--------
>>> import numpy as np
>>> from .pycgmKinetics import f
>>> p = [1, 2]
>>> x = 10
>>> f(p, x)
12
>>> p = np.array([5.16312215, 8.79307163])
>>> x = 2.0
>>> np.around(f(p, x),8)
19.11931593
"""
return (p[0] * x) + p[1]
|
3a5e464e7599b6233086e3dddb623d88c6e5ccb6
| 3,648,300
|
def get_contributors_users(users_info) -> list:
"""
Get the github users from the inner PRs.
Args:
users_info (list): the response of get_inner_pr_request()
Returns (list): Github users
"""
users = []
for item in users_info:
user = item.get('login')
github_profile = item.get('html_url')
pr_body = item.get('body')
if not user == 'xsoar-bot':
users.append({
'Contributor': f"<img src='{item.get('avatar_url')}'/><br></br> "
f"<a href='{github_profile}' target='_blank'>{user}</a>"
})
if user == 'xsoar-bot':
if 'Contributor' in pr_body:
contributor = USER_NAME_REGEX.search(pr_body)[0].replace('\n', '')
user_info = get_github_user(contributor)
github_avatar = user_info.get('avatar_url')
github_profile = user_info.get('html_url')
if not github_avatar and not github_profile:
print(f'The user "{contributor}" was not found.')
continue
users.append({
'Contributor': f"<img src='{github_avatar}'/><br></br> "
f"<a href='{github_profile}' target='_blank'>{contributor}</a>"
})
for user in users:
prs = users.count(user)
user.update({'Number of Contribution(s)': prs})
list_users = []
result = {i['Contributor']: i for i in reversed(users)}.values()
new_res = sorted(result, key=lambda k: k['Number of Contribution(s)'], reverse=True)
for user in new_res:
user['Contributor'] += f'<br></br>{user["Number of Contribution(s)"]} Contributions'
list_users.append(user['Contributor'])
return list_users
|
1a7bdb6608600c2959ec3961dfd9567cf674f471
| 3,648,301
|
def euler2mat(roll, pitch, yaw):
"""
Create a rotation matrix for the orientation expressed by this transform.
Copied directly from FRotationTranslationMatrix::FRotationTranslationMatrix
in Engine/Source/Runtime/Core/Public/Math/RotationTranslationMatrix.h ln 32
:return:
"""
angles = _TORAD * np.array((roll, pitch, yaw))
sr, sp, sy = np.sin(angles)
cr, cp, cy = np.cos(angles)
return np.array([
[cp * cy, sr * sp * cy - cr * sy, -(cr * sp * cy + sr * sy)],
[cp * sy, sr * sp * sy + cr * cy, cy * sr - cr * sp * sy],
[sp, -sr * cp, cr * cp]
])
|
2b635f50bb42f7a79938e38498e6e6fefd993d0f
| 3,648,302
|
def remove_articles(string: str, p: float = 1.0) -> str:
"""Remove articles from text data.
Matches and removes the following articles:
* the
* a
* an
* these
* those
* his
* hers
* their
with probability p.
Args:
string: text
p: probability of removing a given article
Returns:
enriched text
"""
mapping = {article: "" for article in ARTICLES}
return _sub_words(string, probability=p, mapping=mapping)
|
af2b9f61dc36159cb027eae03dbf1b645e48be62
| 3,648,303
|
import collections
import re
def _get_definitions(source):
# type: (str) -> Tuple[Dict[str, str], int]
"""Extract a dictionary of arguments and definitions.
Args:
source: The source for a section of a usage string that contains
definitions.
Returns:
A two-tuple containing a dictionary of all arguments and definitions as
well as the length of the longest argument.
"""
max_len = 0
descs = collections.OrderedDict() # type: Dict[str, str]
lines = (s.strip() for s in source.splitlines())
non_empty_lines = (s for s in lines if s)
for line in non_empty_lines:
if line:
arg, desc = re.split(r"\s\s+", line.strip())
arg_len = len(arg)
if arg_len > max_len:
max_len = arg_len
descs[arg] = desc
return descs, max_len
|
a97fe58c3eb115bff041e77c26868bae3bc54c88
| 3,648,305
|
import requests
def pairs_of_response(request):
"""pairwise testing for content-type, headers in responses for all urls """
response = requests.get(request.param[0], headers=request.param[1])
print(request.param[0])
print(request.param[1])
return response
|
f3a67b1cbf41e2c2e2aa5edb441a449fdff0d8ae
| 3,648,306
|
def setup():
""" The setup wizard screen """
if DRIVER is True:
flash(Markup('Driver not loaded'), 'danger')
return render_template("setup.html")
|
1c13ba635fcdd3dd193e511002d2d289786980a3
| 3,648,307
|
def ldns_pkt_set_edns_extended_rcode(*args):
"""LDNS buffer."""
return _ldns.ldns_pkt_set_edns_extended_rcode(*args)
|
3fed71706554170d07281a59ff524de52487244d
| 3,648,308
|
def sim_spiketrain_poisson(rate, n_samples, fs, bias=0):
"""Simulate spike train from a Poisson distribution.
Parameters
----------
rate : float
The firing rate of neuron to simulate.
n_samples : int
The number of samples to simulate.
fs : int
The sampling rate.
Returns
-------
spikes : 1d array
Simulated spike train.
Examples
--------
Simulate a spike train from a Poisson distribution.
>>> spikes = sim_spiketrain_poisson(0.4, 10, 1000, bias=0)
"""
spikes = np.zeros(n_samples)
# Create uniform sampling distribution
unif = np.random.uniform(0, 1, size=n_samples)
# Create spikes
mask = unif <= ((rate + bias) * 1/fs)
spikes[mask] = 1
return spikes
|
853a16ae50b444fad47dcbea5f7de4edf58f34b5
| 3,648,309
|
def getHausdorff(labels, predictions):
"""Compute the Hausdorff distance."""
# Hausdorff distance is only defined when something is detected
resultStatistics = sitk.StatisticsImageFilter()
resultStatistics.Execute(predictions)
if resultStatistics.GetSum() == 0:
return float('nan')
# Edge detection is done by ORIGINAL - ERODED, keeping the outer boundaries of lesions. Erosion is performed in 2D
eTestImage = sitk.BinaryErode(labels, (1, 1, 0))
eResultImage = sitk.BinaryErode(predictions, (1, 1, 0))
hTestImage = sitk.Subtract(labels, eTestImage)
hResultImage = sitk.Subtract(predictions, eResultImage)
hTestArray = sitk.GetArrayFromImage(hTestImage)
hResultArray = sitk.GetArrayFromImage(hResultImage)
# Convert voxel location to world coordinates. Use the coordinate system of the test image
# np.nonzero = elements of the boundary in numpy order (zyx)
# np.flipud = elements in xyz order
# np.transpose = create tuples (x,y,z)
# labels.TransformIndexToPhysicalPoint converts (xyz) to world coordinates (in mm)
testCoordinates = np.apply_along_axis(labels.TransformIndexToPhysicalPoint, 1,
np.transpose(np.flipud(np.nonzero(hTestArray))).astype(int))
resultCoordinates = np.apply_along_axis(labels.TransformIndexToPhysicalPoint, 1,
np.transpose(np.flipud(np.nonzero(hResultArray))).astype(int))
# Compute distances from test to result; and result to test
dTestToResult = getDistancesFromAtoB(testCoordinates, resultCoordinates)
dResultToTest = getDistancesFromAtoB(resultCoordinates, testCoordinates)
return max(np.percentile(dTestToResult, 95), np.percentile(dResultToTest, 95))
|
933206c551f2abd6608bf4cdbb847328b8fee113
| 3,648,310
|
import re
def _filesizeformat(file_str):
"""
Remove the unicode characters from the output of the filesizeformat()
function.
:param file_str:
:returns: A string representation of a filesizeformat() string
"""
cmpts = re.match(r'(\d+\.?\d*)\S(\w+)', filesizeformat(file_str))
return '{} {}'.format(cmpts.group(1), cmpts.group(2))
|
c9811120a257fda8d3fe6c3ee1cd143f17fc4f6e
| 3,648,311
|
import math
def radec_to_lb(ra, dec, frac=False):
"""
Convert from ra, dec to galactic coordinates.
Formulas from 'An Introduction to Modern Astrophysics (2nd Edition)' by
Bradley W. Carroll, Dale A. Ostlie (Eq. 24.16 onwards).
NOTE: This function is not as accurate as the astropy conversion, nor as
the Javascript calculators found online. However, as using astropy was
prohibitively slow while running over large populations, we use this
function. While this function is not as accurate, the under/over
estimations of the coordinates are equally distributed meaning the errors
cancel each other in the limit of large populations.
Args:
ra (string): Right ascension given in the form '19:06:53'
dec (string): Declination given in the form '-40:37:14'
frac (bool): Denote whether coordinates are already fractional or not
Returns:
gl, gb (float): Galactic longitude and latitude [fractional degrees]
"""
if not frac:
ra, dec = frac_deg(ra, dec)
a = math.radians(ra)
d = math.radians(dec)
# Coordinates of the galactic north pole (J2000)
a_ngp = math.radians(12.9406333 * 15.)
d_ngp = math.radians(27.1282500)
l_ngp = math.radians(123.9320000)
sd_ngp = math.sin(d_ngp)
cd_ngp = math.cos(d_ngp)
sd = math.sin(d)
cd = math.cos(d)
# Calculate galactic longitude
y = cd*math.sin(a - a_ngp)
x = cd_ngp*sd - sd_ngp*cd*math.cos(a - a_ngp)
gl = - math.atan2(y, x) + l_ngp
gl = math.degrees(gl) % 360
# Shift so in range -180 to 180
if gl > 180:
gl = -(360 - gl)
# Calculate galactic latitude
gb = math.asin(sd_ngp*sd + cd_ngp*cd*math.cos(a - a_ngp))
gb = math.degrees(gb) % 360.
if gb > 270:
gb = -(360 - gb)
return gl, gb
|
85156dae81a636f34295bcb8aab6f63243d9c2b3
| 3,648,312
|
from typing import Counter
from datetime import datetime
def status_codes_by_date_stats():
"""
Get stats for status codes by date.
Returns:
list: status codes + date grouped by type: 2xx, 3xx, 4xx, 5xx, attacks.
"""
def date_counter(queryset):
return dict(Counter(map(
lambda dt: ms_since_epoch(datetime.combine(
make_naive(dt), datetime.min.time())),
list(queryset.values_list('datetime', flat=True)))))
codes = {low: date_counter(
RequestLog.objects.filter(status_code__gte=low, status_code__lt=high))
for low, high in ((200, 300), (300, 400), (400, 500))}
codes[500] = date_counter(RequestLog.objects.filter(status_code__gte=500))
codes['attacks'] = date_counter(RequestLog.objects.filter(
status_code__in=(400, 444, 502)))
stats = {}
for code in (200, 300, 400, 500, 'attacks'):
for date, count in codes[code].items():
if stats.get(date, None) is None:
stats[date] = {200: 0, 300: 0, 400: 0, 500: 0, 'attacks': 0}
stats[date][code] += count
stats = sorted([(k, v) for k, v in stats.items()], key=lambda x: x[0])
return stats
|
e56491e32a774f2b3399eb252bc5c7660539d573
| 3,648,313
|
def r8_y1x ( t ):
"""
#*****************************************************************************80
#
#% R8_Y1X evaluates the exact solution of the ODE.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 30 August 2010
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, real T, the value of the independent variable.
#
# Output, real Y1X, the exact solution.
#
"""
y1x = 20.0 / ( 1.0 + 19.0 * exp ( - 0.25 * t ) )
return(y1x)
|
b6146173c09aede82fab599a9e445d3d062bf71a
| 3,648,314
|
def get_target_model_ops(model, model_tr):
"""Get operations related to the target model.
Args:
* model: original model
* model_tr: target model
Returns:
* init_op: initialization operation for the target model
* updt_op: update operation for the target model
"""
init_ops, updt_ops = [], []
for var, var_tr in zip(model.vars, model_tr.vars):
init_ops.append(tf.assign(var_tr, var))
if var not in model.trainable_vars:
updt_ops.append(tf.assign(var_tr, var)) # direct update for non-trainable variables
else:
updt_ops.append(tf.assign(var_tr, (1. - FLAGS.ddpg_tau) * var_tr + FLAGS.ddpg_tau * var))
return tf.group(*init_ops), tf.group(*updt_ops)
|
0967e754e2731140ca5d0d85c9c1b6cff7b2cbd2
| 3,648,315
|
import itertools
import functools
def next_count(start: int = 0, step: int = 1):
"""Return a callable returning descending ints.
>>> nxt = next_count(1)
>>> nxt()
1
>>> nxt()
2
"""
count = itertools.count(start, step)
return functools.partial(next, count)
|
299d457b2b449607ab02877eb108c076cb6c3e16
| 3,648,316
|
def show_locale(key_id: int):
"""Get a locale by ID"""
return locales[key_id]
|
6bce0cc45e145a6bdfb5e84cdde8c6d386525094
| 3,648,317
|
def refresh():
"""Pull fresh data from Open AQ and replace existing data."""
DB.drop_all()
DB.create_all()
update_db()
DB.session.commit()
records = Record.query.all()
return render_template('aq_base.html', title='Refreshed!', records=records)
|
a1f138f92f8e7744d8fc5b659bb4d99fc32341e9
| 3,648,318
|
def get_similar_taxa():
"""
Get a list of all pairwise permutations of taxa sorted according to similarity
Useful for detecting duplicate and near-duplicate taxonomic entries
:return: list of 2-tuples ordered most similar to least
"""
taxa = Taxon.objects.all()
taxon_name_set = set([t.name for t in taxa])
plist = [pair for pair in permutations(taxon_name_set, 2)]
return sorted(plist, key=similar, reverse=True)
|
cb94efad4103edc8db0fe90f0e2bad7b52bf29f5
| 3,648,319
|
import json
def make_img_id(label, name):
""" Creates the image ID for an image.
Args:
label: The image label.
name: The name of the image within the label.
Returns:
The image ID. """
return json.dumps([label, name])
|
4ddcbf9f29d8e50b0271c6ee6260036b8654b90f
| 3,648,320
|
def col2rgb(color):
""" Convert any colour known by matplotlib to RGB [0-255] """
return rgb012rgb(*col2rgb01(color))
|
075dbf101d032bf1fb64a8a4fd86407ec0b91b2d
| 3,648,321
|
from typing import List
import random
def quick_select_median(values: List[tuple], pivot_fn=random.choice, index=0) -> tuple:
"""
Implementation quick select median sort
:param values: List[tuple]
:param pivot_fn:
:param index: int
:return: tuple
"""
k = len(values) // 2
return quick_select(values, k, pivot_fn, index=index)
|
a2977424a9fc776b2448bed4c17eea754003242c
| 3,648,322
|
import time
import hashlib
def get_admin_token(key, previous=False):
"""Returns a token with administrative priviledges
Administrative tokens provide a signature that can be used to authorize
edits and to trigger specific administrative events.
Args:
key (str): The key for generating admin tokens
previous (bool, optional): Retrieve the most recently issued token for this key
Returns:
Token
"""
if key is None:
raise ValueError('Value for "key" was expected')
expires = get_admin_lifetime()
secret = __get_admin_salt()
argset = [secret, key]
ts = int(time.time())
if previous:
ts = ts - expires
argset.extend(str(int(ts / expires)))
str_argset = [str(a) for a in argset if True]
msg = ':'.join(str_argset)
tok = Token(hashlib.sha256(msg.encode('utf-8')).hexdigest()[
0:settings.TOKEN_LENGTH])
return tok
|
6f92378676905b8d035bd201abe30d1d951a7fc0
| 3,648,323
|
def vulnerability_weibull(x, alpha, beta):
"""Return vulnerability in Weibull CDF
Args:
x: 3sec gust wind speed at 10m height
alpha: parameter value used in defining vulnerability curve
beta: ditto
Returns: weibull_min.cdf(x, shape, loc=0, scale)
Note:
weibull_min.pdf = c/s * (x/s)**(c-1) * exp(-(x/s)**c)
c: shape, s: scale, loc=0
weibull_min.cdf = 1 - exp(-(x/s)**c)
while Australian wind vulnerability is defined as
DI = 1 - exp(-(x/exp(beta))**(1/alpha))
therefore:
s = exp(beta)
c = 1/alpha
"""
# convert alpha and beta to shape and scale respectively
shape = 1 / alpha
scale = np.exp(beta)
return weibull_min.cdf(x, shape, loc=0, scale=scale)
|
4bb36643b483309e4a4256eb74bc3bbd7b447416
| 3,648,325
|
def _find_best_deals(analysis_json) -> tuple:
"""Finds the best deal out of the analysis"""
best_deals = []
for deal in analysis_json:
if _get_deal_value(analysis_json, deal) > MINIMUM_ConC_PERCENT:
best_deals.append(deal)
best_deals.sort(key=lambda x: _get_deal_value(analysis_json, x),
reverse=True
)
best_deal = best_deals[0]
return best_deal, best_deals
|
5415f7104ec01a56249df9a142ff3c31b2964c42
| 3,648,326
|
def deserialize(s_transform):
"""
Convert a serialized
:param s_transform:
:return:
"""
if s_transform is None:
return UnrealTransform()
return UnrealTransform(
location=s_transform['location'] if 'location' in s_transform else (0, 0, 0),
rotation=s_transform['rotation'] if 'rotation' in s_transform else (0, 0, 0)
)
|
13daf861e84545d2f50b10617ece6d23976eacf0
| 3,648,327
|
def spectrum(x, times=None, null_hypothesis=None, counts=1, frequencies='auto', transform='dct',
returnfrequencies=True):
"""
Generates a power spectrum from the input time-series data. Before converting to a power
spectrum, x is rescaled as
x - > (x - counts * null_hypothesis) / sqrt(counts * null_hypothesis * (1-null_hypothesis)),
where the arithmetic is element-wise, and `null_hypothesis` is a vector in (0,1).
If `null_hypothesis` is None it is set to the mean of x. If that mean is 0 or 1 then
the power spectrum returned is (0,1,1,1,...).
Parameters
----------
x: array
The time-series data to convert into a power spectrum
times: array, optional
The times associated with the data in `x`. This is not optional for the `lsp` transform
null_hypothesis: None or array, optional
Used to normalize the data, and should be the null hypothesis that is being tested for
the probability trajectory from which `x` is drawn. If `null_hypothesis` is None it is
set to the mean of x.
counts: int, optional
The number of counts per time-step, whereby all values of `x` are within [0,counts].
In the main usages for drift detection, `x` is the clickstream for a single measurement
outcome -- so `x` contains integers between 0 and the number of measurements at a (perhaps
coarse-grained) time. `counts` is this number of measurements per time.
frequencies: 'auto' or array, optional
The frequencies to generate the power spectrum for. Only relevant for transform=`lsp`.
transform: 'dct', 'dft' or 'lsp', optional
The transform to use to generate power spectrum. 'dct' is the Type-II discrete cosine transform
with an orthogonal normalization; 'dft' is the discrete Fourier transform with a unitary
normalization; 'lsp' is the float-meaning Lomb-Scargle periodogram with an orthogonal-like
normalization.
returnfrequencies: bool, optional
Whether to return the frequencies corrsponding to the powers
Returns
-------
if returnfrequencies:
array or None
The frequencies corresponding to the power spectrum. None is returned if the frequencies
cannot be ascertained (when `times` is not specified).
array or None
The amplitudes, that are squared to obtain the powers. None is returned when the transform
does not generate amplitudes (this is the case for `lsp`)
array
The power spectrum
"""
if transform == 'dct' or transform == 'dft':
if transform == 'dct':
modes = dct(x, null_hypothesis, counts)
powers = modes**2
elif transform == 'dft':
modes = dft(x, null_hypothesis, counts)
powers = _np.abs(modes)**2
if returnfrequencies:
if isinstance(frequencies, str):
if times is None: freqs = None
else: freqs = fourier_frequencies_from_times(times)
else:
freqs = frequencies
return freqs, modes, powers
else:
return modes, powers
elif transform == 'lsp':
freqs, powers = lsp(x, times, frequencies, null_hypothesis, counts)
modes = None
if returnfrequencies:
return freqs, modes, powers
else:
return modes, powers
else:
raise ValueError("Input `transform` type invalid!")
|
5a847f75eaa3fda0bc2906e14d56f7870da1edfa
| 3,648,328
|
def CalculatePercentIdentity(pair, gap_char="-"):
"""return number of idential and transitions/transversions substitutions
in the alignment.
"""
transitions = ("AG", "GA", "CT", "TC")
transversions = ("AT", "TA", "GT", "TG", "GC", "CG", "AC", "CA")
nidentical = 0
naligned = 0
ndifferent = 0
ntransitions = 0
ntransversions = 0
nunaligned = 0
for x in range(min(len(pair.mAlignedSequence1), len(pair.mAlignedSequence2))):
if pair.mAlignedSequence1[x] != gap_char and \
pair.mAlignedSequence2[x] != gap_char:
naligned += 1
if pair.mAlignedSequence1[x] == pair.mAlignedSequence2[x]:
nidentical += 1
else:
ndifferent += 1
if (pair.mAlignedSequence1[x] + pair.mAlignedSequence2[x]) in transitions:
ntransitions += 1
if (pair.mAlignedSequence1[x] + pair.mAlignedSequence2[x]) in transversions:
ntransversions += 1
else:
nunaligned += 1
return nidentical, ntransitions, ntransversions, naligned, nunaligned
|
84d67754d9f63eaee5a172425ffb8397c3b5a7ff
| 3,648,329
|
def render_ellipse(center_x, center_y, covariance_matrix, distance_square):
"""
Renders a Bokeh Ellipse object given the ellipse center point, covariance, and distance square
:param center_x: x-coordinate of ellipse center
:param center_y: y-coordinate of ellipse center
:param covariance_matrix: NumPy array containing the covariance matrix of the ellipse
:param distance_square: value for distance square of ellipse
:return: Bokeh Ellipse object
"""
values, vectors = np.linalg.eigh(covariance_matrix)
order = values.argsort()[::-1]
values = values[order]
vectors = vectors[:, order]
angle_rads = np.arctan2(*vectors[:, 0][::-1])
# Width and height are full width (the axes lengths are thus multiplied by 2.0 here)
width, height = 2.0 * np.sqrt(values * distance_square)
ellipse = Ellipse(
x=center_x,
y=center_y,
width=width,
height=height,
angle=angle_rads,
line_width=line_width,
line_color=line_color,
fill_color=fill_color,
fill_alpha=fill_alpha
)
return ellipse
|
8f26a9a41a8f179f87925f0a931fbc81d2d8549b
| 3,648,330
|
def flow_to_image(flow):
"""Transfer flow map to image.
Part of code forked from flownet.
"""
out = []
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
maxrad = -1
for i in range(flow.shape[0]):
u = flow[i, :, :, 0]
v = flow[i, :, :, 1]
idxunknow = (abs(u) > 1e7) | (abs(v) > 1e7)
u[idxunknow] = 0
v[idxunknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(maxrad, np.max(rad))
u = u / (maxrad + np.finfo(float).eps)
v = v / (maxrad + np.finfo(float).eps)
img = compute_color(u, v)
out.append(img)
return np.float32(np.uint8(out))
|
301ef598b2e6aeda2e2f673854850faf0409e0e8
| 3,648,331
|
def joint_dataset(l1, l2):
"""
Create a joint dataset for two non-negative integer (boolean) arrays.
Works best for integer arrays with values [0,N) and [0,M) respectively.
This function will create an array with values [0,N*M), each value
representing a possible combination of values from l1 and l2. Essentially,
this is equivalent to zipping l1 and l2, but much faster by using the NumPy
native implementations of elementwise addition and multiplication.
:param l1: first integer vector (values within 0-n)
:type l1: numpy.array or similar
:param l2: second integer vector (values with 0-m)
:type l2: numpy.array or similar
:returns: integer vector expressing states of both l1 and l2
"""
N = np.max(l1) + 1
return l2 * N + l1
|
6ba767739793f7c188d56e24e6e07d6e594c775e
| 3,648,332
|
import uuid
def parse(asset, image_data, product):
""" Parses the GEE metadata for ODC use.
Args:
asset (str): the asset ID of the product in the GEE catalog.
image_data (dict): the image metadata to parse.
product (datacube.model.DatasetType): the product information from the ODC index.
Returns: a namedtuple of the data required by ODC for indexing.
"""
bands = tuple(zip(product.measurements, image_data['bands']))
_id = str(uuid.uuid5(uuid.NAMESPACE_URL, f'EEDAI:{product.name}/{image_data["name"]}'))
creation_dt = image_data['startTime']
spatial_reference = image_data['bands'][0]['grid']\
.get('crsCode', image_data['bands'][0]['grid'].get('crsWkt'))
# Handle special GEE Infinity GeoJSON responses
image_data['geometry']['coordinates'][0] = [[float(x), float(y)]
for (x, y) \
in image_data['geometry']['coordinates'][0]]
geometry = Geometry(image_data['geometry'])
grids = [band['grid'] for band in image_data['bands']]
grids_copy = grids.copy()
grids = list(filter(lambda grid:
grids_copy.pop(grids_copy.index(grid)) \
not in grids_copy, grids))
shapes = [[grid['dimensions']['height'], grid['dimensions']['width']] \
for grid in grids]
affine_values = [list(grid['affineTransform'].values()) \
for grid in grids]
transforms = [list(Affine(affine_value[0], 0, affine_value[1],
affine_value[2], 0, affine_value[3]))\
for affine_value in affine_values]
bands = tuple(zip(product.measurements,
image_data['bands']))
metadata = Metadata(id=_id,
product=product.name,
creation_dt=creation_dt,
format='GeoTIFF',
platform=product.metadata_doc['properties'].get('eo:platform'),
instrument=product.metadata_doc['properties'].get('eo:instrument'),
from_dt=creation_dt,
to_dt=creation_dt,
center_dt=creation_dt,
asset=asset,
geometry=geometry,
shapes=shapes,
transforms=transforms,
grids=grids,
spatial_reference=spatial_reference,
path=f'EEDAI:{image_data["name"]}:',
bands=bands)
return metadata
|
815da17849b240a291332a695ca38374bb957d8a
| 3,648,333
|
def scale(pix, pixMax, floatMin, floatMax):
""" scale takes in
pix, the CURRENT pixel column (or row)
pixMax, the total # of pixel columns
floatMin, the min floating-point value
floatMax, the max floating-point value
scale returns the floating-point value that
corresponds to pix
"""
return (pix / pixMax) * (floatMax - floatMin) + floatMin
|
455d0233cbeeafd53c30baa4584dbdac8502ef94
| 3,648,334
|
def most_distinct(df):
"""
:param df: data frame
:return:
"""
headers = df.columns.values
dist_list = [] # list of distinct values per list
for idx, col_name in enumerate(headers):
col = df[col_name]
col_list = col.tolist()
# if len(col_list) == 0:
# dist_list.append(-1)
# continue
avg_token_size = sum([len(str(a)) for a in col_list]) * 1.0 / len(col_list)
if avg_token_size < 4:
dist_list.append(-1)
else:
nums = get_numerics_from_list(col_list)
if nums is None:
dist_list.append(len(set(col_list)))
else:
dist_list.append(-1)
max_num = max(dist_list)
if max_num == -1 or max_num == 0:
return -1
for i, c in enumerate(dist_list):
if c == max_num:
return i
|
f21ba5ffd2bfcf5262ffbbe30f24a77522a10bb0
| 3,648,335
|
def make_set(value):
"""
Takes a value and turns it into a set
!!!! This is important because set(string) will parse a string to
individual characters vs. adding the string as an element of
the set i.e.
x = 'setvalue'
set(x) = {'t', 'a', 'e', 'v', 'u', 's', 'l'}
make_set(x) = {'setvalue'}
or use set([x,]) by adding string as first item in list.
:param value:
:return:
"""
if isinstance(value, list):
value = set(value)
elif not isinstance(value, set):
value = set([value])
return value
|
c811729ea83dc1fbff7c76c8b596e26153aa68ee
| 3,648,336
|
def _get_parent_cache_dir_url():
"""Get parent cache dir url from `petastorm.spark.converter.parentCacheDirUrl`
We can only set the url config once.
"""
global _parent_cache_dir_url # pylint: disable=global-statement
conf_url = _get_spark_session().conf \
.get(SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF, None)
if conf_url is None:
raise ValueError(
"Please set the spark config {}.".format(SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF))
conf_url = normalize_dir_url(conf_url)
_check_parent_cache_dir_url(conf_url)
_parent_cache_dir_url = conf_url
logger.info(
'Read %s %s', SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF, _parent_cache_dir_url)
return _parent_cache_dir_url
|
34abb96b64ab5338a6c9a5ef700a6fbb00f3905f
| 3,648,337
|
def make_variable(data, variances=None, **kwargs):
"""
Make a Variable with default dimensions from data
while avoiding copies beyond what sc.Variable does.
"""
if isinstance(data, (list, tuple)):
data = np.array(data)
if variances is not None and isinstance(variances, (list, tuple)):
variances = np.array(variances)
if isinstance(data, np.ndarray):
dims = ['x', 'y'][:np.ndim(data)]
return sc.array(dims=dims, values=data, variances=variances, **kwargs)
return sc.scalar(data, **kwargs)
|
a712800df05c8c8f5f968a0fee6127919ae56d8f
| 3,648,338
|
def dt642epoch(dt64):
"""
Convert numpy.datetime64 array to epoch time
(seconds since 1/1/1970 00:00:00)
Parameters
----------
dt64 : numpy.datetime64
Single or array of datetime64 object(s)
Returns
-------
time : float
Epoch time (seconds since 1/1/1970 00:00:00)
"""
return dt64.astype('datetime64[ns]').astype('float') / 1e9
|
f7cdaf44312cb0564bf57393a5fde727bc24e566
| 3,648,339
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.