content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def get_model_data(n_samples=None, ratio=None):
"""
Provides train and validation data to train the model. If n_samples and
ratio are not None, it returns data according to the ratio between v1 and v2.
V1 is data comming from the original distribution of SIRD parameters, and
V2 is data comming from distributions based on errors of trained ML models.
Parameters
----------
n_samples : int, optional
Subset of samples from the original set. The default is None.
ratio : float, optional
Ratio of the data from distribudion based on errors. The default is None.
Returns
-------
df_train_val : pandas.DataFrame
"""
df_train_val = pd.read_pickle(
f"{root_project}/data/processed/train_val_set.pickle")
df_train_val_rev = pd.read_pickle(
f"{root_project}/data/processed/train_val_set_rev.pickle")
df_v1_train_val = pd.read_pickle(
f"{root_project}/data/processed/train_val_set_v1.pickle")
df_v2_train_val = pd.read_pickle(
f"{root_project}/data/processed/train_val_set_v2.pickle")
if n_samples is not None and ratio is not None:
df_train_val = take_samples(df_v1_train_val,
df_v2_train_val,
n_samples,
ratio)
return df_train_val
elif n_samples is not None:
df_train_val = df_train_val.sample(n_samples, random_state=42)
return df_train_val
else:
return df_train_val_rev
|
540706ca37decbf718fcedeef30beb235e98ded8
| 3,641,445
|
def skip():
""" Decorator for marking test function that should not be executed."""
def wrapper(fn):
fn.__status__ = "skip"
return fn
return wrapper
|
0b966c306515073bfb52427b78c65822ee09a060
| 3,641,446
|
def _GetThumbnailType(destination_id):
"""Returns the thumbnail type for the destination with the id."""
destination_type = _GetDestinationType(destination_id)
if destination_type == _DestinationType.HOTSPOT:
return _ThumbnailType.PRETTY_EARTH
else:
return _ThumbnailType.GEOMETRY_OUTLINE
|
3452044aae2f9660084be46d840747089f271b1b
| 3,641,448
|
async def postAsync(text: str, *, url: str = "auto", config: ConfigOptions = ConfigOptions(), timeout: float = 30.0,
retries: int = 3):
"""Alias function for AsyncHaste().post(...)"""
return await AsyncHaste().post(text, url=url, config=config, timeout=timeout, retries=retries)
|
bfa5460ac6f469c123eb1bef6e2430f2251809c9
| 3,641,449
|
from typing import OrderedDict
def gpu_load_acquisition_csv(acquisition_path, **kwargs):
""" Loads acquisition data
Returns
-------
GPU DataFrame
"""
chronometer = Chronometer.makeStarted()
cols = [
'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',
'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',
'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',
'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',
'relocation_mortgage_indicator'
]
dtypes = OrderedDict([
("loan_id", "int64"),
("orig_channel", "category"),
("seller_name", "category"),
("orig_interest_rate", "float64"),
("orig_upb", "int64"),
("orig_loan_term", "int64"),
("orig_date", "date"),
("first_pay_date", "date"),
("orig_ltv", "float64"),
("orig_cltv", "float64"),
("num_borrowers", "float64"),
("dti", "float64"),
("borrower_credit_score", "float64"),
("first_home_buyer", "category"),
("loan_purpose", "category"),
("property_type", "category"),
("num_units", "int64"),
("occupancy_status", "category"),
("property_state", "category"),
("zip", "int64"),
("mortgage_insurance_percent", "float64"),
("product_type", "category"),
("coborrow_credit_score", "float64"),
("mortgage_insurance_type", "float64"),
("relocation_mortgage_indicator", "category")
])
print(acquisition_path)
acquisition_table = pyblazing.create_table(table_name='acq', type=get_type_schema(acquisition_path), path=acquisition_path, delimiter='|', names=cols, dtypes=get_dtype_values(dtypes), skip_rows=1)
Chronometer.show(chronometer, 'Read Acquisition CSV')
return acquisition_table
|
fdc2281a6bc31547f60c9c8d8585cdc1d101d88f
| 3,641,450
|
def get_flows_src_dst_address_pairs(device, flow_monitor):
""" Gets flows under flow_monitor and returns source and destination address pairs
Args:
device ('obj'): Device to use
flow_monitor ('str'): Flow monitor name
Raises:
N/A
Returns:
[('source_address', 'destination_address'), ...]
"""
log.info('Getting all source and destination address pairs under flow monitor {name}'
.format(name=flow_monitor))
try:
output = device.parse('show flow monitor {name} cache format table'
.format(name=flow_monitor))
except SchemaEmptyParserError:
return []
pairs = []
# All hardcoded keys are mandatory in the parser
for src in output.get('ipv4_src_addr', {}):
for dst in output['ipv4_src_addr'][src]['ipv4_dst_addr']:
pairs.append((src, dst))
return pairs
|
61ffbe3e0e81acf8c408df7b5ca0f8ff9519f87b
| 3,641,451
|
def imthresh(im, thresh):
"""
Sets pixels in image below threshold value to 0
Args:
im (ndarray): image
thresh (float): threshold
Returns:
ndarray: thresholded image
"""
thresh_im = im.copy()
thresh_im[thresh_im < thresh] = 0
return thresh_im
|
180dc1eba6320c21273e50e4cf7b3f28c786b839
| 3,641,452
|
import _winreg
def set_serv_parms(service, args):
""" Set the service command line parameters in Registry """
uargs = []
for arg in args:
uargs.append(unicoder(arg))
try:
key = _winreg.CreateKey(_winreg.HKEY_LOCAL_MACHINE, _SERVICE_KEY + service)
_winreg.SetValueEx(key, _SERVICE_PARM, None, _winreg.REG_MULTI_SZ, uargs)
_winreg.CloseKey(key)
except WindowsError:
return False
return True
|
90eb2ac8ea6e9a11b7ff8c266017fe027503f159
| 3,641,453
|
def isRenderNode():
# type: () -> bool
"""
Returns
-------
bool
"""
return flavor() == 'Render'
|
b0d5799f755c9c6a72f851ad325b4d8ddf3dec70
| 3,641,454
|
def test_clean_data_contains_instance_value():
"""
Test values from instances remain when not in data.
"""
data = {'first_name': 'John'}
fields = ['job_title', 'first_name']
class Job(object):
job_title = 'swamper'
first_name = ''
class Swamper(BaseSwamper):
def build_instances(self):
obj = Job()
self.instances = {}
self.instances[Job] = obj
def clean_instances(self):
"""
`clean` depends on having both job_title and first_name, so provide
the data which isn't available from self.data by copying it from
an instance to self.cleaned_data.
"""
if self.instances:
for model, instance in self.instances.items():
# Update cleaned_data with fields from instance that aren't
# part of the new data.
initial_fields = set(fields) - set(self.data.keys())
obj_data = {field: getattr(instance, field) for field in initial_fields}
self.cleaned_data.update(obj_data)
def clean(self):
if self.cleaned_data['job_title'] == 'swamper':
if not self.cleaned_data['first_name'].startswith('J'):
raise ValueError('Only people with a first name that begin '
'with the letter J can become swampers.')
return self.cleaned_data
swamper = Swamper(fields, data)
assert swamper.errors == {}
assert swamper.cleaned_data['job_title'] == 'swamper'
assert swamper.cleaned_data['first_name'] == 'John'
obj = Job()
obj = swamper.build_or_update(obj, fields)
assert obj.job_title == 'swamper'
assert obj.first_name == 'John'
|
d2c310cfd760ddea7241ccb469ae7cffdaf373ea
| 3,641,455
|
def wrap_application(app: App, wsgi: WSGICallable) -> WSGICallable:
"""Wrap a given WSGI callable in all active middleware."""
for middleware_instance in reversed(ACTIVE_MIDDLEWARES):
wsgi = middleware_instance(app, wsgi)
return wsgi
|
09574d87e241c19cae30c2db29ee1ed4744a0c68
| 3,641,456
|
def cal_rpn(imgsize, featuresize, scale, gtboxes):
"""
Args:
imgsize: [h, w]
featuresize: the size of each output feature map, e.g. [19, 19]
scale: the scale factor of the base anchor to the feature map, e.g. [32, 32]
gtboxes: ground truth boxes in the image, shape of [N, 4].
stride: the stride of the output feature map.
Returns:
labels: label for each anchor, shape of [N, ], -1 for ignore, 0 for background, 1 for object
bbox_targets: bbox regrssion target for each anchor, shape of [N, 4]
"""
imgh, imgw = imgsize
# gen base anchor
base_anchor = gen_anchor(featuresize, scale)
# calculate iou
overlaps = cal_overlaps(base_anchor, gtboxes)
# init labels -1 don't care 0 is negative 1 is positive
labels = np.empty(base_anchor.shape[0])
labels.fill(-1)
# for each GT box corresponds to an anchor which has highest IOU
gt_argmax_overlaps = overlaps.argmax(axis=0)
# the anchor with the highest IOU overlap with a GT box
anchor_argmax_overlaps = overlaps.argmax(axis=1)
anchor_max_overlaps = overlaps[range(overlaps.shape[0]), anchor_argmax_overlaps]
# IOU > IOU_POSITIVE
labels[anchor_max_overlaps > config.IOU_POSITIVE] = 1
# IOU <IOU_NEGATIVE
labels[anchor_max_overlaps < config.IOU_NEGATIVE] = 0
# ensure that every GT box has at least one positive RPN region
labels[gt_argmax_overlaps] = 1
# only keep anchors inside the image
outside_anchor = np.where(
(base_anchor[:, 0] < 0)
| (base_anchor[:, 1] < 0)
| (base_anchor[:, 2] >= imgw)
| (base_anchor[:, 3] >= imgh)
)[0]
labels[outside_anchor] = -1
# subsample positive labels ,if greater than RPN_POSITIVE_NUM(default 128)
fg_index = np.where(labels == 1)[0]
# print(len(fg_index))
if len(fg_index) > config.RPN_POSITIVE_NUM:
labels[
np.random.choice(
fg_index, len(fg_index) - config.RPN_POSITIVE_NUM, replace=False
)
] = -1
# subsample negative labels
if not config.OHEM:
bg_index = np.where(labels == 0)[0]
num_bg = config.RPN_TOTAL_NUM - np.sum(labels == 1)
if len(bg_index) > num_bg:
# print('bgindex:',len(bg_index),'num_bg',num_bg)
labels[
np.random.choice(bg_index, len(bg_index) - num_bg, replace=False)
] = -1
bbox_targets = bbox_transfrom(base_anchor, gtboxes[anchor_argmax_overlaps, :])
return [labels, bbox_targets], base_anchor
|
19178b125024d213808a497a005336678589588f
| 3,641,457
|
def run(args):
"""This function is called by a user to recover or reset their primary
one-time-password secret. This is used, e.g. if a user has changed
their phone, or if they think the secret has been compromised, or
if they have lost the secret completely (in which case they will
need to log in using a backup method and then call this function
from that login)
The user will need to pass in a validated Authorisation, meaning
they must have a login by at least one method (e.g. a pre-approved
device or a one-time-login requested via backup codes or via
an admin-authorised login)
"""
auth = Authorisation.from_data(args["authorisation"])
try:
reset_otp = bool(args["reset_otp"])
except:
reset_otp = False
auth.verify(resource="reset_otp")
identity_uid = auth.identity_uid()
service = get_this_service(need_private_access=True)
if service.uid() != identity_uid:
raise PermissionError(
"You can only reset the OTP on the identity service on "
"which the user is registered! %s != %s" %
(service.uid(), identity_uid))
user_uid = auth.user_uid()
return (user_uid, reset_otp)
|
c81e533c7dead3fcda028ef85e566d290a85ec74
| 3,641,458
|
import time
def adjust_price(iteration, current_price, global_start, last_tx_time):
""" Function that decides to lower or increase the price, according to the
time of previous transaction and the progress in reaching TARGET in
TARGET_TIME.
Args:
iteration (int) - Number of previous successful transactions. Iterator
which changes with the changing of nonce;
current_price (int) - Current gas price in Wei;
global_start (float/Unix format) - The start of the whole process;
last_tx_time (float/Unix format) - Time spent in previous iteration.
Return:
current_price (int) - New gas price after adjustments.
"""
if iteration > 0:
target_ratio = TARGET_TIME / TARGET
actual_ratio = (time.time() - global_start) / iteration
# If we check only the duration of the latest tx, it will increase
# the price very rapidly, ignoring the global progress.
# So it is necessary to control the price according to plan.
if actual_ratio < target_ratio:
current_price -= int(current_price / 10)
elif last_tx_time >= target_ratio:
current_price += int(current_price / 10)
return current_price
|
f27f13e7b4a753d6b912ed1d795383f0d206b2ef
| 3,641,459
|
import copy
def read_csv_batch(file: str, offset, cnt, **read_csv_params):
"""
Args:
file:
offset:
cnt:
read_csv_params:
Returns:
"""
read_csv_params = copy(read_csv_params)
if read_csv_params is None:
read_csv_params = {}
try:
usecols = read_csv_params.pop('usecols')
except KeyError:
usecols = None
header = pd.read_csv(file, nrows=0, **read_csv_params).columns
with open(file, 'rb') as f:
f.seek(offset)
data = pd.read_csv(f, header=None, names=header, chunksize=None, nrows=cnt, usecols=usecols, **read_csv_params)
return data
|
cc6699db5b9ecae9706d52768c8a1dcd084062ea
| 3,641,460
|
def fault_ack_faults_by_dn(cookie, in_dns):
""" Auto-generated UCSC XML API Method. """
method = ExternalMethod("FaultAckFaultsByDn")
method.cookie = cookie
method.in_dns = in_dns
xml_request = method.to_xml(option=WriteXmlOption.DIRTY)
return xml_request
|
532e925b560d02a0ed47d61f9aa721f55fc6b650
| 3,641,461
|
from typing import List
def provides(name=None, needs: List[str] = None):
"""A shortcut for defining a factory function that also needs dependencies
itself."""
if not needs:
needs = []
def decorator(f):
decorated = _needs(*needs)(f)
set(name or f.__name__, decorated)
return f
return decorator
|
e28e8d5690b7fa53907864c6d17e199a491ccada
| 3,641,462
|
def clip_xyxy_to_image(x1, y1, x2, y2, height, width):
"""Clip coordinates to an image with the given height and width."""
x1 = np.minimum(width - 1.0, np.maximum(0.0, x1))
y1 = np.minimum(height - 1.0, np.maximum(0.0, y1))
x2 = np.minimum(width - 1.0, np.maximum(0.0, x2))
y2 = np.minimum(height - 1.0, np.maximum(0.0, y2))
return x1, y1, x2, y2
|
cf0fe5269afe2a5cbe94efb3221184f706fcb59d
| 3,641,463
|
import re
def build_url(urlo, base, end, url_whitespace, url_case):
""" Build and return a valid url.
Parameters
----------
urlo A ParseResult object returned by urlparse
base base_url from config
end end_url from config
url_whitespace url_whitespace from config
url_case url_case from config
Returns
-------
URL string
"""
if not urlo.netloc:
if not end:
clean_target = re.sub(r'\s+', url_whitespace, urlo.path)
else:
clean_target = re.sub(r'\s+', url_whitespace, urlo.path.rstrip('/'))
if clean_target.endswith(end):
end = ''
if base.endswith('/'):
path = "%s%s%s" % (base, clean_target.lstrip('/'), end)
elif base and not clean_target.startswith('/'):
path = "%s/%s%s" % (base, clean_target, end)
else:
path = "%s%s%s" % (base, clean_target, end)
if url_case == 'lowercase':
urlo = urlo._replace(path=path.lower() )
elif url_case == 'uppercase':
urlo = urlo._replace(path=path.upper() )
else:
urlo = urlo._replace(path=path)
return urlunparse(urlo)
|
b6fa39062502a7d862b17cd079de3c4cfa3720c4
| 3,641,464
|
import re
def remove_links(txt: str):
"""
Remove weblinks from the text
"""
pattern = r'[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)'
txt = re.sub(pattern, " ", txt)
txt = re.sub('http|https', " ", txt)
return txt
|
4ccaae84d12ab47e70482d15100ba2e60ef476e8
| 3,641,466
|
def timefn(fn):
"""Times a function and stores the result in LOG variables"""
@wraps(fn)
def inside(*args, **kwargs):
start = timer()
result = fn(*args, **kwargs)
end = timer()
gv.TIME_LOG += f'Fn : {fn.__name__} - {end - start}\n'
return result
return inside
|
8dddcd54489d2fb754d9c4de7bc1b084a10840e2
| 3,641,467
|
def get_tweet_stream(output_file, twitter_credentials):
"""
This function is given and returns a "stream" to listen to tweets and store them in output_file
To understand how this function works, check it against the code of twitter_streaming in part00_preclass
:param output_file: the file where the returned stream will store tweets
:param twitter_credentials: a dicionary containing the credentials to aceess twitter (you should have created your own!_
:return: a "stream" variable to track live tweets
"""
access_token = twitter_credentials['access_token']
access_token_secret = twitter_credentials['access_token_secret']
consumer_key = twitter_credentials['consumer_key']
consumer_secret = twitter_credentials['consumer_secret']
l = TweetToFileListener()
l.set_output_file(output_file)
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
return stream
|
71a336b71760ef74e14b6472cb8f2a8510d9acb3
| 3,641,468
|
def conv3d_3x3(filters,
stride=1,
padding=1,
kernel_initializer=None,
bias_initializer=None,
name=None):
"""3D convolution with padding."""
return keras.Sequential([
layers.ZeroPadding3D(padding),
layers.Conv3D(filters,
kernel_size=3,
strides=stride,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
padding='valid')
], name=name)
|
f8035b8be1bf82385c31aa0810e7addd8027b5cd
| 3,641,469
|
def get_available_quests(user, num_quests):
"""Get the quests the user could participate in."""
quests = []
for quest in Quest.objects.exclude(questmember__user=user).order_by('priority'):
if quest.can_add_quest(user) and not quest.completed_quest(user):
quests.append(quest)
if len(quests) == num_quests:
return quests
return quests
|
d1bdbe96dbd0b7fd5295ec43153249e7b93c7339
| 3,641,470
|
def height():
""" Default window height """
return get_default_height()
|
fab02ec1881d1c2ccda9f59e6a72d2990d815973
| 3,641,471
|
def no_adjust_tp_func_nb(c: AdjustTPContext, *args) -> float:
"""Placeholder function that returns the initial take-profit value."""
return c.curr_stop
|
6896b72c20c79156c97ba09ba87a1947c7df04d6
| 3,641,472
|
def inverse_theoretical_laser_position(y, a, b, c):
"""
theoretical angular position of the wire in respect to the laser position
"""
return np.pi - a - np.arccos((b - y) / c)
|
0ba87442954fd3bc832edec9adc082e1d2448347
| 3,641,473
|
def ad_roc(y_true, y_score):
""" Compute ROC-curve.
"""
fpr, tpr, thresholds = sklearn.metrics.roc_curve(y_true, y_score, pos_label=1, drop_intermediate=False)
return fpr, tpr, thresholds
|
db161f7099aab4e1d266ab2b0c8aac0b96076a49
| 3,641,474
|
import torch
def greedy_decoding(baseline_transformer, src_representations_batch, src_mask, trg_field_processor, max_target_tokens=100):
"""
Supports batch (decode multiple source sentences) greedy decoding.
Decoding could be further optimized to cache old token activations because they can't look ahead and so
adding a newly predicted token won't change old token's activations.
Example: we input <s> and do a forward pass. We get intermediate activations for <s> and at the output at position
0, after the doing linear layer we get e.g. token <I>. Now we input <s>,<I> but <s>'s activations will remain
the same. Similarly say we now got <am> at output position 1, in the next step we input <s>,<I>,<am> and so <I>'s
activations will remain the same as it only looks at/attends to itself and to <s> and so forth.
"""
device = next(baseline_transformer.parameters()).device
pad_token_id = trg_field_processor.vocab.stoi[PAD_TOKEN]
# Initial prompt is the beginning/start of the sentence token. Make it compatible shape with source batch => (B,1)
target_sentences_tokens = [[BOS_TOKEN] for _ in range(src_representations_batch.shape[0])]
trg_token_ids_batch = torch.tensor([[trg_field_processor.vocab.stoi[tokens[0]]] for tokens in target_sentences_tokens], device=device)
# Set to true for a particular target sentence once it reaches the EOS (end-of-sentence) token
is_decoded = [False] * src_representations_batch.shape[0]
while True:
trg_mask, _ = get_masks_and_count_tokens_trg(trg_token_ids_batch, pad_token_id)
# Shape = (B*T, V) where T is the current token-sequence length and V target vocab size
predicted_log_distributions = baseline_transformer.decode(trg_token_ids_batch, src_representations_batch, trg_mask, src_mask)
# Extract only the indices of last token for every target sentence (we take every T-th token)
num_of_trg_tokens = len(target_sentences_tokens[0])
predicted_log_distributions = predicted_log_distributions[num_of_trg_tokens-1::num_of_trg_tokens]
# This is the "greedy" part of the greedy decoding:
# We find indices of the highest probability target tokens and discard every other possibility
most_probable_last_token_indices = torch.argmax(predicted_log_distributions, dim=-1).cpu().numpy()
# Find target tokens associated with these indices
predicted_words = [trg_field_processor.vocab.itos[index] for index in most_probable_last_token_indices]
for idx, predicted_word in enumerate(predicted_words):
target_sentences_tokens[idx].append(predicted_word)
if predicted_word == EOS_TOKEN: # once we find EOS token for a particular sentence we flag it
is_decoded[idx] = True
if all(is_decoded) or num_of_trg_tokens == max_target_tokens:
break
# Prepare the input for the next iteration (merge old token ids with the new column of most probable token ids)
trg_token_ids_batch = torch.cat((trg_token_ids_batch, torch.unsqueeze(torch.tensor(most_probable_last_token_indices, device=device), 1)), 1)
# Post process the sentences - remove everything after the EOS token
target_sentences_tokens_post = []
for target_sentence_tokens in target_sentences_tokens:
try:
target_index = target_sentence_tokens.index(EOS_TOKEN) + 1
except:
target_index = None
target_sentence_tokens = target_sentence_tokens[:target_index]
target_sentences_tokens_post.append(target_sentence_tokens)
return target_sentences_tokens_post
|
dbdf636979f28ea09b261fd3947068d6e4e359ad
| 3,641,476
|
def _parse_cell_type(cell_type_arg):
""" Convert the cell type representation to the expected JVM CellType object."""
def to_jvm(ct):
return _context_call('_parse_cell_type', ct)
if isinstance(cell_type_arg, str):
return to_jvm(cell_type_arg)
elif isinstance(cell_type_arg, CellType):
return to_jvm(cell_type_arg.cell_type_name)
|
1afa4b2ed28d08ebc3526b8462673e5aa7f8a47f
| 3,641,477
|
def Ht(mu, q=None, t=None, pi=None):
"""
Returns the symmetric Macdonald polynomial using the Haiman,
Haglund, and Loehr formula.
Note that if both `q` and `t` are specified, then they must have the
same parent.
REFERENCE:
- J. Haglund, M. Haiman, N. Loehr.
*A combinatorial formula for non-symmetric Macdonald polynomials*.
:arXiv:`math/0601693v3`.
EXAMPLES::
sage: from sage.combinat.sf.ns_macdonald import Ht
sage: HHt = SymmetricFunctions(QQ['q','t'].fraction_field()).macdonald().Ht()
sage: Ht([0,0,1])
x0 + x1 + x2
sage: HHt([1]).expand(3)
x0 + x1 + x2
sage: Ht([0,0,2])
x0^2 + (q + 1)*x0*x1 + x1^2 + (q + 1)*x0*x2 + (q + 1)*x1*x2 + x2^2
sage: HHt([2]).expand(3)
x0^2 + (q + 1)*x0*x1 + x1^2 + (q + 1)*x0*x2 + (q + 1)*x1*x2 + x2^2
"""
P, q, t, n, R, x = _check_muqt(mu, q, t, pi)
res = 0
for a in n:
weight = a.weight()
res += q**a.maj()*t**a.inv()*prod( x[i]**weight[i] for i in range(len(weight)) )
return res
|
d3a46458215417db0789d3601163e105c9712c75
| 3,641,478
|
def is_generic_alias_of(to_check, type_def):
"""
:param to_check: the type that is supposed to be a generic alias of ``type_def`` if this function returns ``True``.
:param type_def: the type that is supposed to be a generic version of ``to_check`` if this function returns \
``True``.
:return: ``True`` if ``to_check`` is a generic alias of ``type_def``, ``False`` otherwise.
"""
if isinstance(to_check, type) and issubclass(to_check, type_def):
return True
origin = getattr(to_check, "__origin__", None)
if origin is not None:
return issubclass(origin, type_def)
return False
|
d09b255e9ff44a65565196dd6643564aea181433
| 3,641,479
|
def train_PCA(X,n_dims,model='pca'):
"""
name: train_PCA
Linear dimensionality reduction using Singular Value Decomposition of the
data to project it to a lower dimensional space.
It uses the LAPACK implementation of the full SVD or a randomized truncated
SVD by the method of Halko et al. 2009, depending on the shape of the input
data and the number of components to extract.
returns: the transformer model
"""
estimator=transformer[model].set_params(pca__n_components=n_dims)
estimator.fit(X)
return estimator
|
1909d154d778864c2eba0819e43a2bbcb260edbf
| 3,641,480
|
def phedex_url(api=''):
"""Return Phedex URL for given API name"""
return 'https://cmsweb.cern.ch/phedex/datasvc/json/prod/%s' % api
|
a642cd138d9be4945dcbd924c7b5c9892de36baa
| 3,641,482
|
import csv
def extract_emails(fname, email='Email Address',
outfile="emails_from_mailchimp.txt",
nofile=False, nolog=False, sort=True):
"""
Extract e-mail addresses from a CSV-exported MailChimp list.
:param fname: the input .csv file
:param email: the header of the column containing all e-mail addresses
:param outfile: the .txt file the addresses will get written to
:param nofile: suppresses the creation of a text file if set to True
:param nolog: suppresses logging the addresses to stdout if set to True
:param sort: sorts e-mail addresses alphabetically if set to True
:return a list containing all e-mail addresses
"""
addresses = []
try:
with open(fname, newline='') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
next(reader)
for item in reader:
try:
addresses.append(item[email])
except KeyError:
log.error("The provided CSV file does not contain "
"the header \"{}\".\n"
"Please provide the correct header name "
"for the column containing e-mail "
"addresses.".format(email))
return
except FileNotFoundError:
log.error("The input file is not available. "
"Please provide a valid path.")
except IsADirectoryError:
log.error("The input file is not a CSV file but a directory.")
except StopIteration:
log.error("The input file cannot be read. "
"Please provide a valid CSV file.")
if sort:
addresses.sort()
if not nolog:
for address in addresses:
log.info(address)
if not nofile:
try:
with open(outfile, 'w') as txtfile:
for address in addresses:
txtfile.write(address + '\n')
except FileNotFoundError:
log.error("The file you are trying to write to "
"does not exist.")
except PermissionError:
log.error("You do not have permission to write to the file "
"whose path you provided.")
return addresses
|
1b4e5f60eacd4843e1c9ba6a72c866c52b5bd8d9
| 3,641,483
|
def continuations(tree, *, syntax, expander, **kw):
"""[syntax, block] call/cc for Python.
This allows saving the control state and then jumping back later
(in principle, any time later). Some possible use cases:
- Tree traversal (possibly a cartesian product of multiple trees, with the
current position in each tracked automatically).
- McCarthy's amb operator.
- Generators. (Python already has those, so only for teaching.)
This is a very loose pythonification of Paul Graham's continuation-passing
macros, which implement continuations by chaining closures and passing the
continuation semi-implicitly. For details, see chapter 20 in On Lisp:
http://paulgraham.com/onlisp.html
Continuations are most readily implemented when the program is written in
continuation-passing style (CPS), but that is unreadable for humans.
The purpose of this macro is to partly automate the CPS transformation, so
that at the use site, we can write CPS code in a much more readable fashion.
A ``with continuations`` block implies TCO; the same rules apply as in a
``with tco`` block. Furthermore, ``with continuations`` introduces the
following additional rules:
- Functions which make use of continuations, or call other functions that do,
must be defined within a ``with continuations`` block, using the usual
``def`` or ``lambda`` forms.
- All function definitions in a ``with continuations`` block, including
any nested definitions, have an implicit formal parameter ``cc``,
**even if not explicitly declared** in the formal parameter list.
If declared explicitly, ``cc`` must be in a position that can accept a
default value.
This means ``cc`` must be declared either as by-name-only::
with continuations:
def myfunc(a, b, *, cc):
...
f = lambda *, cc: ...
or as the last parameter that has no default::
with continuations:
def myfunc(a, b, cc):
...
f = lambda cc: ...
Then the continuation machinery will automatically set the default value
of ``cc`` to the default continuation (``identity``), which just returns
its arguments.
The most common use case for explicitly declaring ``cc`` is that the
function is the target of a ``call_cc[]``; then it helps readability
to make the ``cc`` parameter explicit.
- A ``with continuations`` block will automatically transform all
function definitions and ``return`` statements lexically contained
within the block to use the continuation machinery.
- ``return somevalue`` actually means a tail-call to ``cc`` with the
given ``somevalue``.
Multiple values can be returned as a ``Values``. Multiple-valueness
is tested at run time.
Any ``Values`` return value is automatically unpacked to the args
and kwargs of ``cc``.
- An explicit ``return somefunc(arg0, ..., k0=v0, ...)`` actually means
a tail-call to ``somefunc``, with its ``cc`` automatically set to our
``cc``. Hence this inserts a call to ``somefunc`` before proceeding
with our current continuation. (This is most often what we want when
making a tail-call from a continuation-enabled function.)
Here ``somefunc`` **must** be a continuation-enabled function;
otherwise the TCO chain will break and the result is immediately
returned to the top-level caller.
(If the call succeeds at all; the ``cc`` argument is implicitly
filled in and passed by name. Regular functions usually do not
accept a named parameter ``cc``, let alone know what to do with it.)
- Just like in ``with tco``, a lambda body is analyzed as one big
return-value expression. This uses the exact same analyzer; for example,
``do[]`` (including any implicit ``do[]``) and the ``let[]`` expression
family are supported.
- Calls from functions defined in one ``with continuations`` block to those
defined in another are ok; there is no state or context associated with
the block.
- Much of the language works as usual.
Any non-tail calls can be made normally. Regular functions can be called
normally in any non-tail position.
Continuation-enabled functions behave as regular functions when
called normally; only tail calls implicitly set ``cc``. A normal call
uses ``identity`` as the default ``cc``.
- For technical reasons, the ``return`` statement is not allowed at the
top level of the ``with continuations:`` block. (Because a continuation
is essentially a function, ``return`` would behave differently based on
whether it is placed lexically before or after a ``call_cc[]``.)
If you absolutely need to terminate the function surrounding the
``with continuations:`` block from inside the block, use an exception
to escape; see ``call_ec``, ``catch``, ``throw``.
**Capturing the continuation**:
Inside a ``with continuations:`` block, the ``call_cc[]`` statement
captures a continuation. (It is actually a macro, for technical reasons.)
For various possible program topologies that continuations may introduce, see
the clarifying pictures under ``doc/`` in the source distribution.
Syntax::
x = call_cc[func(...)]
*xs = call_cc[func(...)]
x0, ... = call_cc[func(...)]
x0, ..., *xs = call_cc[func(...)]
call_cc[func(...)]
Conditional variant::
x = call_cc[f(...) if p else g(...)]
*xs = call_cc[f(...) if p else g(...)]
x0, ... = call_cc[f(...) if p else g(...)]
x0, ..., *xs = call_cc[f(...) if p else g(...)]
call_cc[f(...) if p else g(...)]
Assignment targets:
- To destructure positional multiple-values (from a `Values` return value),
use a tuple assignment target (comma-separated names, as usual).
Destructuring *named* return values from a `call_cc` is currently not supported.
- The last assignment target may be starred. It is transformed into
the vararg (a.k.a. ``*args``) of the continuation function.
(It will capture a whole tuple, or any excess items, as usual.)
- To ignore the return value (useful if ``func`` was called only to
perform its side-effects), just omit the assignment part.
Conditional variant:
- ``p`` is any expression. If truthy, ``f(...)`` is called, and if falsey,
``g(...)`` is called.
- Each of ``f(...)``, ``g(...)`` may be ``None``. A ``None`` skips the
function call, proceeding directly to the continuation. Upon skipping,
all assignment targets (if any are present) are set to ``None``.
The starred assignment target (if present) gets the empty tuple.
- The main use case of the conditional variant is for things like::
with continuations:
k = None
def setk(cc):
global k
k = cc
def dostuff(x):
call_cc[setk() if x > 10 else None] # capture only if x > 10
...
To keep things relatively straightforward, a ``call_cc[]`` is only
allowed to appear **at the top level** of:
- the ``with continuations:`` block itself
- a ``def`` or ``async def``
Nested defs are ok; here *top level* only means the top level of the
*currently innermost* ``def``.
If you need to place ``call_cc[]`` inside a loop, use ``@looped`` et al.
from ``unpythonic.fploop``; this has the loop body represented as the
top level of a ``def``.
Multiple ``call_cc[]`` statements in the same function body are allowed.
These essentially create nested closures.
**Main differences to Scheme and Racket**:
Compared to Scheme/Racket, where ``call/cc`` will capture also expressions
occurring further up in the call stack, our ``call_cc`` may be need to be
placed differently (further out, depending on what needs to be captured)
due to the delimited nature of the continuations implemented here.
Scheme and Racket implicitly capture the continuation at every position,
whereas we do it explicitly, only at the use sites of the ``call_cc`` macro.
Also, since there are limitations to where a ``call_cc[]`` may appear, some
code may need to be structured differently to do some particular thing, if
porting code examples originally written in Scheme or Racket.
Unlike ``call/cc`` in Scheme/Racket, ``call_cc`` takes **a function call**
as its argument, not just a function reference. Also, there's no need for
it to be a one-argument function; any other args can be passed in the call.
The ``cc`` argument is filled implicitly and passed by name; any others are
passed exactly as written in the client code.
**Technical notes**:
The ``call_cc[]`` statement essentially splits its use site into *before*
and *after* parts, where the *after* part (the continuation) can be run
a second and further times, by later calling the callable that represents
the continuation. This makes a computation resumable from a desired point.
The return value of the continuation is whatever the original function
returns, for any ``return`` statement that appears lexically after the
``call_cc[]``.
The effect of ``call_cc[]`` is that the function call ``func(...)`` in
the brackets is performed, with its ``cc`` argument set to the lexically
remaining statements of the current ``def`` (at the top level, the rest
of the ``with continuations`` block), represented as a callable.
The continuation itself ends there (it is *delimited* in this particular
sense), but it will chain to the ``cc`` of the function it appears in.
This is termed the *parent continuation* (**pcc**), stored in the internal
variable ``_pcc`` (which defaults to ``None``).
Via the use of the pcc, here ``f`` will maintain the illusion of being
just one function, even though a ``call_cc`` appears there::
def f(*, cc):
...
call_cc[g(1, 2, 3)]
...
The continuation is a closure. For its pcc, it will use the value the
original function's ``cc`` had when the definition of the continuation
was executed (for that particular instance of the closure). Hence, calling
the original function again with its ``cc`` set to something else will
produce a new continuation instance that chains into that new ``cc``.
The continuation's own ``cc`` will be ``identity``, to allow its use just
like any other function (also as argument of a ``call_cc`` or target of a
tail call).
When the pcc is set (not ``None``), the effect is to run the pcc first,
and ``cc`` only after that. This preserves the whole captured tail of a
computation also in the presence of nested ``call_cc`` invocations (in the
above example, this would occur if also ``g`` used ``call_cc``).
Continuations are not accessible by name (their definitions are named by
gensym). To get a reference to a continuation instance, stash the value
of the ``cc`` argument somewhere while inside the ``call_cc``.
The function ``func`` called by a ``call_cc[func(...)]`` is (almost) the
only place where the ``cc`` argument is actually set. There it is the
captured continuation. Roughly everywhere else, ``cc`` is just ``identity``.
Tail calls are an exception to this rule; a tail call passes along the current
value of ``cc``, unless overridden manually (by setting the ``cc=...`` kwarg
in the tail call).
When the pcc is set (not ``None``) at the site of the tail call, the
machinery will create a composed continuation that runs the pcc first,
and ``cc`` (whether current or manually overridden) after that. This
composed continuation is then passed to the tail call as its ``cc``.
**Tips**:
- Once you have a captured continuation, one way to use it is to set
``cc=...`` manually in a tail call, as was mentioned. Example::
def main():
call_cc[myfunc()] # call myfunc, capturing the current cont...
... # ...which is the rest of "main"
def myfunc(cc):
ourcc = cc # save the captured continuation (sent by call_cc[])
def somefunc():
return dostuff(..., cc=ourcc) # and use it here
somestack.append(somefunc)
In this example, when ``somefunc`` is eventually called, it will tail-call
``dostuff`` and then proceed with the continuation ``myfunc`` had
at the time when that instance of the ``somefunc`` closure was created.
(This pattern is essentially how to build the ``amb`` operator.)
- Instead of setting ``cc``, you can also overwrite ``cc`` with a captured
continuation inside a function body. That overrides the continuation
for the rest of the dynamic extent of the function, not only for a
particular tail call::
def myfunc(cc):
ourcc = cc
def somefunc():
cc = ourcc
return dostuff(...)
somestack.append(somefunc)
- A captured continuation can also be called manually; it's just a callable.
The assignment targets, at the ``call_cc[]`` use site that spawned this
particular continuation, specify its call signature. All args are
positional, except the implicit ``cc``, which is by-name-only.
- Just like in Scheme/Racket's ``call/cc``, the values that get bound
to the ``call_cc[]`` assignment targets on second and further calls
(when the continuation runs) are the arguments given to the continuation
when it is called (whether implicitly or manually).
- Setting ``cc`` to ``unpythonic.fun.identity``, while inside a ``call_cc``,
will short-circuit the rest of the computation. In such a case, the
continuation will not be invoked automatically. A useful pattern for
suspend/resume.
- However, it is currently not possible to prevent the rest of the tail
of a captured continuation (the pcc) from running, apart from manually
setting ``_pcc`` to ``None`` before executing a ``return``. Note that
doing that is not strictly speaking supported (and may be subject to
change in a future version).
- When ``call_cc[]`` appears inside a function definition:
- It tail-calls ``func``, with its ``cc`` set to the captured
continuation.
- The return value of the function containing one or more ``call_cc[]``
statements is the return value of the continuation.
- When ``call_cc[]`` appears at the top level of ``with continuations``:
- A normal call to ``func`` is made, with its ``cc`` set to the captured
continuation.
- In this case, if the continuation is called later, it always
returns ``None``, because the use site of ``call_cc[]`` is not
inside a function definition.
- If you need to insert just a tail call (no further statements) before
proceeding with the current continuation, no need for ``call_cc[]``;
use ``return func(...)`` instead.
The purpose of ``call_cc[func(...)]`` is to capture the current
continuation (the remaining statements), and hand it to ``func``
as a first-class value.
- To combo with ``multilambda``, use this ordering::
with multilambda, continuations:
...
- Some very limited comboability with ``call_ec``. May be better to plan
ahead, using ``call_cc[]`` at the appropriate outer level, and then
short-circuit (when needed) by setting ``cc`` to ``identity``.
This avoids the need to have both ``call_cc`` and ``call_ec`` at the
same time.
- ``unpythonic.ec.call_ec`` can be used normally **lexically before any**
``call_cc[]``, but (in a given function) after at least one ``call_cc[]``
has run, the ``ec`` ceases to be valid. This is because our ``call_cc[]``
actually splits the function into *before* and *after* parts, and
**tail-calls** the *after* part.
(Wrapping the ``def`` in another ``def``, and placing the ``call_ec``
on the outer ``def``, does not help either, because even the outer
function has exited by the time *the continuation* is later called
the second and further times.)
Usage of ``call_ec`` while inside a ``with continuations`` block is::
with continuations:
@call_ec
def result(ec):
print("hi")
ec(42)
print("not reached")
assert result == 42
result = call_ec(lambda ec: do[print("hi"),
ec(42),
print("not reached")])
Note the signature of ``result``. Essentially, ``ec`` is a function
that raises an exception (to escape to a dynamically outer context),
whereas the implicit ``cc`` is the closure-based continuation handled
by the continuation machinery.
See the ``tco`` macro for details on the ``call_ec`` combo.
"""
if syntax != "block":
raise SyntaxError("continuations is a block macro only") # pragma: no cover
if syntax == "block" and kw['optional_vars'] is not None:
raise SyntaxError("continuations does not take an as-part") # pragma: no cover
# Two-pass macro.
with dyn.let(_macro_expander=expander):
return _continuations(block_body=tree)
|
333494e07462ee554701616c5069fa61c5f46841
| 3,641,484
|
def DNA_dynamic_pressure(y, r, h, yunits='kT', dunits='m', opunits='kg/cm^2'):
"""Estimate peak pynamic overpressure at range r from a burst of yield y using the
the Defense Nuclear Agency 1kT standard free airburst overpressure, assuming an ideal
surface. Many real-world surfaces are not ideal (most, in the opinion of Soviet
analysts), meaning that this function has only limited predictove capability."""
yld = convert_units(y, yunits, 'kT')
gr = convert_units(r, dunits, 'm')
height = convert_units(h, dunits, 'm')
dyn = _DNAairburstpeakdyn(gr, yld, height)
return convert_units(dyn, 'Pa', opunits)
|
ac56c9d72c516658384ac313c64ba7ed1235e0ea
| 3,641,486
|
def revcumsum(U):
"""
Reverse cumulative sum for faster performance.
"""
return U.flip(dims=[0]).cumsum(dim=0).flip(dims=[0])
|
da147820073f5be9d00b137e48a28d726516dcd0
| 3,641,487
|
def http_trace_parser_hook(request):
"""
Retrieves the propagation context out of the request. Uses the honeycomb header, with W3C header as fallback.
"""
honeycomb_header_value = honeycomb.http_trace_parser_hook(request)
w3c_header_value = w3c.http_trace_parser_hook(request)
if honeycomb_header_value:
return honeycomb_header_value
else:
return w3c_header_value
|
7c97ed82f22357d3867e8a504a30f3a857837bcf
| 3,641,488
|
import torch
def format_attn(attention_tuples: tuple):
"""
Input: N tuples (N = layer num)
Each tuple item is Tensor of shape
Batch x num heads x from x to
Output: Tensor of shape layer x from x to
(averaged over heads)
"""
# Combine tuples into large Tensor, then avg
return torch.cat([l for l in attention_tuples], dim=0).mean(dim=1)
|
8d25d081992099835a21cdbefb406f378350f983
| 3,641,489
|
def fit_gaussian2d(img, coords, boxsize, plot=False,
fwhm_min=1.7, fwhm_max=30, pos_delta_max=1.7):
"""
Calculate the FWHM of an objected located at the pixel
coordinates in the image. The FWHM will be estimated
from a cutout with the specified boxsize.
Parameters
----------
img : ndarray, 2D
The image where a star is located for calculating a FWHM.
coords : len=2 ndarray
The [x, y] pixel position of the star in the image.
boxsize : int
The size of the box (on the side), in pixels.
fwhm_min : float, optional
The minimum allowed FWHM for constraining the fit (pixels).
fwhm_max : float, optional
The maximum allowed FWHM for constraining the fit (pixels).
pos_delta_max : float, optional
The maximum allowed positional offset for constraining the fit (pixels).
This ensures that the fitter doesn't wonder off to a bad pixel.
"""
cutout_obj = Cutout2D(img, coords, boxsize, mode='strict')
cutout = cutout_obj.data
x1d = np.arange(0, cutout.shape[0])
y1d = np.arange(0, cutout.shape[1])
x2d, y2d = np.meshgrid(x1d, y1d)
# Setup our model with some initial guess
x_init = boxsize/2.0
y_init = boxsize/2.0
stddev_init = fwhm_to_stddev(fwhm_min)
g2d_init = models.Gaussian2D(x_mean = x_init,
y_mean = y_init,
x_stddev = stddev_init,
y_stddev = stddev_init,
amplitude=cutout.max())
g2d_init += models.Const2D(amplitude=0.0)
g2d_init.x_stddev_0.min = fwhm_to_stddev(fwhm_min)
g2d_init.y_stddev_0.min = fwhm_to_stddev(fwhm_min)
g2d_init.x_stddev_0.max = fwhm_to_stddev(fwhm_max)
g2d_init.y_stddev_0.max = fwhm_to_stddev(fwhm_max)
g2d_init.x_mean_0.min = x_init - pos_delta_max
g2d_init.x_mean_0.max = x_init + pos_delta_max
g2d_init.y_mean_0.min = y_init - pos_delta_max
g2d_init.y_mean_0.max = y_init + pos_delta_max
# print(g2d_init)
# pdb.set_trace()
fit_g = fitting.LevMarLSQFitter()
g2d = fit_g(g2d_init, x2d, y2d, cutout)
if plot:
mod_img = g2d(x2d, y2d)
plt.figure(1, figsize=(15,5))
plt.clf()
plt.subplots_adjust(left=0.05, wspace=0.3)
plt.subplot(1, 3, 1)
plt.imshow(cutout, vmin=mod_img.min(), vmax=mod_img.max())
plt.colorbar()
plt.title("Original")
plt.subplot(1, 3, 2)
plt.imshow(mod_img, vmin=mod_img.min(), vmax=mod_img.max())
plt.colorbar()
plt.title("Model")
plt.subplot(1, 3, 3)
plt.imshow(cutout - mod_img)
plt.colorbar()
plt.title("Orig - Mod")
# Adjust Gaussian parameters to the original coordinates.
cutout_pos = np.array([g2d.x_mean_0.value, g2d.y_mean_0.value])
origin_pos = cutout_obj.to_original_position(cutout_pos)
g2d.x_mean_0 = origin_pos[0]
g2d.y_mean_0 = origin_pos[1]
return g2d
|
c3e69f93fdf84c7f895f9cb01adf3e6a0aa3001d
| 3,641,490
|
def _ensure_aware(series, tz_local):
"""Convert naive datetimes to timezone-aware, or return them as-is.
Args:
tz_local (str, pytz.timezone, dateutil.tz.tzfile):
Time zone for time which timestamps will be converted to.
If the series already has local timezone info, it is returned as-is.
"""
if pd.api.types.is_datetime64tz_dtype(series):
return series
return series.dt.tz_localize(tz=tz_local)
|
fbb99be365a47507ae676fc90601d13cfa46832b
| 3,641,491
|
def compute_epsilon(steps):
"""Computes epsilon value for given hyperparameters."""
if FLAGS.noise_multiplier == 0.0:
return float('inf')
orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))
sampling_probability = FLAGS.batch_size / NB_TRAIN
rdp = compute_rdp(q=sampling_probability,
noise_multiplier=FLAGS.noise_multiplier,
steps=steps,
orders=orders)
# Delta is set to 1e-5 because Penn TreeBank has 60000 training points.
return get_privacy_spent(orders, rdp, target_delta=1e-5)[0]
|
8c998fdcafaac3c99a87b4020ae6959e64170d36
| 3,641,493
|
def extract_tumblr_posts(client, nb_requests, search_query, before, delta_limit):
"""Extract Tumblr posts with a given emotion.
Parameters:
client: Authenticated Tumblr client with the pytumblr package.
nb_requests: Number of API request.
search_query: Emotion to search for.
before: A timestamp to search for posts before that value.
delta_limit: Maximum difference of timestamp between two queries.
Returns:
posts: List of Tumblr posts.
"""
posts = []
for i in range(nb_requests):
tagged = client.tagged(search_query, filter='text', before=before)
for elt in tagged:
timestamp = elt['timestamp']
if (abs(timestamp - before) < delta_limit):
before = timestamp
current_post = []
current_post.append(elt['id'])
current_post.append(elt['post_url'])
elt_type = elt['type']
current_post.append(elt_type)
current_post.append(timestamp)
current_post.append(elt['date'])
current_post.append(elt['tags'])
current_post.append(elt['liked'])
current_post.append(elt['note_count'])
if (elt_type == 'photo'):
# Only take the first image
current_post.append(elt['photos'][0]['original_size']['url'])
current_post.append(elt['caption'].replace('\n',' ').replace('\r',' '))
current_post.append(search_query)
posts.append(current_post)
elif (elt_type == 'text'):
current_post.append(np.nan)
current_post.append(elt['body'].replace('\n',' ').replace('\r',' '))
current_post.append(search_query)
posts.append(current_post)
return posts
|
bff51bcdc945244a47a88d32139749dddf25f0cf
| 3,641,494
|
def total_curtailment_expression_rule(mod, g, tmp):
"""
**Expression Name**: GenVar_Total_Curtailment_MW
**Defined Over**: GEN_VAR_OPR_TMPS
Available energy that was not delivered
There's an adjustment for subhourly reserve provision:
1) if downward reserves are provided, they will be called upon
occasionally, so power provision will have to decrease and additional
curtailment will be incurred;
2) if upward reserves are provided (energy is being curtailed),
they will be called upon occasionally, so power provision will have to
increase and less curtailment will be incurred
The subhourly adjustment here is a simple linear function of reserve
Assume cap factors don't incorporate availability derates,
so don't multiply capacity by Availability_Derate here (will count
as curtailment).
"""
return (
mod.Capacity_MW[g, mod.period[tmp]] * mod.gen_var_cap_factor[g, tmp]
- mod.GenVar_Provide_Power_MW[g, tmp]
+ mod.GenVar_Subhourly_Curtailment_MW[g, tmp]
- mod.GenVar_Subhourly_Energy_Delivered_MW[g, tmp]
)
|
9a1466dbbbc945b30c1df04dc86a2134b3d0659a
| 3,641,495
|
def transpose(m):
"""Compute the inverse of `m`
Args:
m (Matrix3):
Returns:
Matrix3: the inverse
"""
return Matrix3(m[0], m[3], m[6],
m[1], m[4], m[7],
m[2], m[5], m[8])
|
843a4b9d52f7c15772957b7abe05ef8c32c8370b
| 3,641,496
|
def reverse_string(string):
"""Solution to exercise C-4.16.
Write a short recursive Python function that takes a character string s and
outputs its reverse. For example, the reverse of "pots&pans" would be
"snap&stop".
"""
n = len(string)
def recurse(idx):
if idx == 0:
return string[0] # Base case, decremented to beginning of string
return string[idx] + recurse(idx-1)
return recurse(n-1)
|
6d4472fb9c042939020e8b819b4c9b705afd1e60
| 3,641,497
|
def result_to_df(model, data,
path: str = None,
prediction: str = 'prediction',
residual: str = 'residual') -> pd.DataFrame:
"""Create result data frame.
Args:
model (Union[NodeModel, StagewiseModel]): Model instance.
data (MRData): Data object try to predict.s
prediction (str, optional):
Column name of the prediction. Defaults to 'prediction'.
residual (str, optional):
Column name of the residual. Defaults to 'residual'.
path (Union[str, None], optional):
Address that save the result, include the file name.
If ``None`` do not save the result, only return the result data
frame. Defaults to None.
Returns:
pd.DataFrame: Result data frame.
"""
data._sort_by_data_id()
pred = model.predict(data)
resi = data.obs - pred
df = data.to_df()
df[prediction] = pred
df[residual] = resi
if path is not None:
df.to_csv(path)
return df
|
8b089569d628a0f89381240a133b21ae926da7f9
| 3,641,498
|
import re
def auto(frmt, minV = None, maxV = None):
"""
Generating regular expressions for integer, real, date and time.
:param format: format similar to C printf function (description below)
:param min: optional minimum value
:param max: optional maximum value
:return: regular expression for a given format
Supported formats: see :py:class:`regexpgen.integer`, :py:class:`regexpgen.real`, :py:class:`regexpgen.date`, :py:class:`regexpgen.time`
Additional information:
Because single %d occurs as well in integer format and in date format, the integer function is preferred. To generate single %d for date please use regexpgen.date
Examples of use:
>>> import regexpgen
>>> regexpgen.auto("%Y-%m-%d", "2013-03-15", "2013-04-24")
'^(2013\\-03\\-(1[5-9]|2[0-9]|3[0-1])|2013\\-03\\-(0[1-9]|1[0-9]|2[0-9]|3[0-1])|2013\\-04\\-(0[1-9]|1[0-9]|2[0-9]|30)|2013\\-04\\-(0[1-9]|1[0-9]|2[0-4]))$'
>>> regexpgen.auto("%0d", -10, 10)
'^(-?([0-9]|10))$'
"""
if (frmt is None or not isinstance(frmt, str)):
raise ValueError("Bad input")
b = builder.RegexpBuilder()
integerFormats = frmt in ["%d", "%0d"] or re.match("^%0[0-9]+d$", frmt)
integerFormatsNotd = frmt in ["%0d"] or re.match("^%0[0-9]+d$", frmt)
realFormats = frmt in ["%lf", "%0lf"] or re.match("^%\.[0-9]+lf$", frmt) or re.match("^%0\.[0-9]+lf$", frmt) or re.match("^%0[1-9][0-9]*\.[0-9]+lf$", frmt) or re.match("^%[1-9][0-9]*\.[0-9]+lf$", frmt)
timeFormats = str(frmt).find("%H") >= 0 or str(frmt).find("%I") >= 0 or str(frmt).find("%M") >= 0 or str(frmt).find("%p") >= 0 or str(frmt).find("%P") >= 0 or str(frmt).find("%S") >= 0
dateFormats = str(frmt).find("%d") >= 0 or str(frmt).find("%m") >= 0 or str(frmt).find("%Y") >= 0 or str(frmt).find("%y") >= 0
if integerFormats and realFormats:
raise ValueError("Bad input")
elif integerFormatsNotd and dateFormats:
raise ValueError("Bad input")
elif integerFormats and timeFormats:
raise ValueError("Bad input")
elif realFormats and dateFormats:
raise ValueError("Bad input")
elif realFormats and timeFormats:
raise ValueError("Bad input")
elif dateFormats and timeFormats:
raise ValueError("Bad input")
elif integerFormats:
return b.createIntegerRegex(frmt, minV, maxV)
elif realFormats:
return b.createRealRegex(frmt, minV, maxV)
elif dateFormats:
return b.createDateRegex(frmt, minV, maxV)
elif timeFormats:
return b.createTimeRegex(frmt, minV, maxV)
else:
raise ValueError("Bad input")
|
a160b2c49baf875adb0a7949b8ea0e0e92dc936a
| 3,641,499
|
def tf_box_3d_diagonal_length(boxes_3d):
"""Returns the diagonal length of box_3d
Args:
boxes_3d: An tensor of shape (N x 7) of boxes in box_3d format.
Returns:
Diagonal of all boxes, a tensor of (N,) shape.
"""
lengths_sqr = tf.square(boxes_3d[:, 3])
width_sqr = tf.square(boxes_3d[:, 4])
height_sqr = tf.square(boxes_3d[:, 5])
lwh_sqr_sums = lengths_sqr + width_sqr + height_sqr
diagonals = tf.sqrt(lwh_sqr_sums)
return diagonals
|
acf1788f8e035a3adf96f3b303f6344bcee0a1f1
| 3,641,500
|
async def employment_plot(current_city:City):
"""
Visualize employment information for city
- see industry breakdown and employment type
### Query Parameters
- city
### Response
JSON string to render with react-plotly.js
"""
city = validate_city(current_city)
city_data = CityData(city)
# Industry
industry_type = city_data.subset[city_data.industry()]
industry_melt = pd.melt(industry_type)
industry_melt.columns = ['industry', 'percentage']
# Employment Type
employment_type = city_data.subset[city_data.employment()]
type_melt = pd.melt(employment_type)
type_melt.columns = ['employment type', 'percentage']
#Create subplots
fig = make_subplots(rows=1, cols=2, subplot_titles = (f'Industry in {city}', f'Employment Types in {city}'))
fig.add_trace(go.Bar(x = industry_melt['industry'], y = industry_melt['percentage'],
marker = dict(color = industry_melt['percentage'], coloraxis = "coloraxis")),
row = 1, col = 1)
fig.add_trace(go.Bar(x =type_melt['employment type'], y =type_melt['percentage'],
marker = dict(color = type_melt['percentage'], coloraxis = "coloraxis")),
row = 1, col = 2)
fig.update_layout(
coloraxis=dict(colorscale = 'Bluered_r'),
coloraxis_showscale = False,
showlegend = False)
fig.show()
# fig.write_html("path/to/file.html")
return fig.to_json()
|
4db7f3f0973391c7294be486088a24c6ffa2770a
| 3,641,501
|
def get_freesurfer_matrix_ras2vox():
"""
Get standard matrix to convert RAS coordinate to voxel index for Freesurfer conformed space volumes.
Get matrix to convert RAS coordinate to voxel index for Freesurfer conformed space volumes. See the documentation for get_freesurfer_matrix_vox2ras for background information.
Returns
-------
2D numpy array
The affine transformation matrix, a float matrix with shape (4, 4).
"""
return npl.inv(get_freesurfer_matrix_vox2ras())
|
5d5ee8d7bec4f632e494f468f6ebc7ff20cdf85c
| 3,641,502
|
def parse_create_table(string):
"""Parse the create table sql query and return metadata
Args:
string(sql): SQL string from a SQL Statement
Returns:
table_data(dict): table_data dictionary for instantiating a table
"""
# Parse the base table definitions
table_data = to_dict(get_base_parser().parseString(string))
# Parse the columns and append to the list
table_data['columns'] = list()
table_data['constraints'] = list()
column_position = 0
for field in table_data['raw_fields']:
try:
column = to_dict(get_column_parser().parseString(field))
# Add position of the column
column['position'] = column_position
column_position += 1
# Change fk_reference_column to string from list
if FK_REFERENCE in column:
column[FK_REFERENCE] = column[FK_REFERENCE][0]
table_data['columns'].append(column)
except ParseException:
try:
constraint = to_dict(
get_constraints_parser().parseString(field))
table_data['constraints'].append(constraint)
except ParseException:
logger.error(field)
raise
return table_data
|
e82875dfcc3cd052aeecac8c38277c26f0d15e8f
| 3,641,503
|
def retrieve_context_connection_connection_by_id(uuid): # noqa: E501
"""Retrieve connection by ID
Retrieve operation of resource: connection # noqa: E501
:param uuid: ID of uuid
:type uuid: str
:rtype: Connection
"""
return 'do some magic!'
|
4de55de3a799f7c41168fa9072b1a03345dd61de
| 3,641,504
|
def read_filenames(path):
"""
Read all file names from `path` and match them against FILENAME_REGEX.
Arguments:
- path: path to the directory containing CSV data files.
Returns:
- list of tuples of every filename and regex match to the CSV filename
format in the specified directory
"""
daily_filenames = [(f, FILENAME_REGEX.match(f))
for f in listdir(path) if isfile(join(path, f))]
return daily_filenames
|
970b00dc5947426960110fa646c9c1c91114ef9f
| 3,641,505
|
def _sp_sleep_for(t: int) -> str:
"""Return the subprocess cmd for sleeping for `t` seconds."""
return 'python -c "import time; time.sleep({})"'.format(t)
|
20ac8022a2438ceb62123f534ba5911b7c560502
| 3,641,506
|
import re
def verify_show_environment(dut, verify_str_list):
"""
To get show environment.
Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com)
"""
command = "show environment"
output = utils.remove_last_line_from_string(st.show(dut, command, skip_tmpl=True))
result = True
for item in verify_str_list:
if not re.findall(item, output, re.IGNORECASE):
st.error("Item '{}' is NOT found".format(item))
result = False
return result
|
9334045f2b4ff2e33085398b871ff7a905b995ee
| 3,641,507
|
def get_labelset_keys():
"""get labelset keys
Given DATA_CFG, return slideviewer labelsets
Args:
none
Returns:
list: a list of labelset names
"""
cfg = ConfigSet()
label_config = cfg.get_value(path=const.DATA_CFG+'::LABEL_SETS')
labelsets = [cfg.get_value(path=const.DATA_CFG+'::USE_LABELSET')]
if cfg.get_value(path=const.DATA_CFG+'::USE_ALL_LABELSETS'):
labelsets = list(label_config.keys())
return labelsets
|
824d15b529bccb576c359fb50614ed1e33aa561c
| 3,641,508
|
from typing import List
def create_instrument_level_pattern(instrument_symbols: List[str]) -> str:
"""Creates a regular expression pattern to target all the instrument symbols in a list.
The function creates a regular expression pattern to target, within a specific DC
message, the portion of the message containing the complete instrument symbol, for
each instrument symbol included in the list passed as an input of the function.
Parameters
----------
instrument_symbols: List[str]
A list of the stable components of the futures instrument symbols.
Returns
-------
str
A regular expression pattern.
"""
specific_instrument_regexes = [
create_specific_instrument_regex(name)
for name in instrument_symbols
]
return rf"({'|'.join(specific_instrument_regexes)})"
|
25e1e9cc52b009e8e4fa95f8502e5b10cad29209
| 3,641,509
|
def localtime(nist_lookup=0,
localtime=DateTime.localtime,utctime=utctime):
""" Returns the current local time as DateTime instance.
Same notes as for utctime().
"""
return localtime(utctime(nist_lookup).gmticks())
|
312bb973edd62b03d2d251e4d8e215cd00bd470d
| 3,641,510
|
from datetime import datetime
def device_now():
"""Return datetime object constructed from 'now' on device."""
cmd = "adb shell date '+%Y:%m:%d:%H:%M:%S'"
lines = u.docmdlines(cmd)
line = lines.pop(0)
if line is None:
u.error("unable to interpret output from '%s'" % cmd)
d = line.split(":")
try:
dt = datetime(int(d[0]), int(d[1]), int(d[2]),
int(d[3]), int(d[4]), int(d[5]))
return dt
except ValueError:
u.error("unable to parse/interpret output "
"from cmd '%s' (value %s)" % (cmd, line))
|
93e927194390e77fcc7b26cb22db2e5d1debd164
| 3,641,511
|
def copy_safe_request(request):
"""
Copy selected attributes from a request object into a new fake request object. This is needed in places where
thread safe pickling of the useful request data is needed.
"""
meta = {
k: request.META[k]
for k in HTTP_REQUEST_META_SAFE_COPY
if k in request.META and isinstance(request.META[k], str)
}
return NautobotFakeRequest(
{
"META": meta,
"POST": request.POST,
"GET": request.GET,
"FILES": request.FILES,
"user": request.user,
"path": request.path,
"id": getattr(request, "id", None), # UUID assigned by middleware
}
)
|
a0e2b670732a2d09ac51678059bac80d115b350b
| 3,641,512
|
import hashlib
def sha256(firmware_filename, firmware_size=None):
"""Returns the sha256 hash of the firmware"""
hasher = hashlib.sha256()
# If firmware size is supplied, then we want a sha256 of the firmware with its header
if firmware_size is not None:
hasher.update(b"\x00" + firmware_size.to_bytes(4, "little"))
with open(firmware_filename, "rb", buffering=0) as file:
while True:
chunk = file.read(128)
if not chunk:
break
hasher.update(chunk)
return hasher.digest()
|
62fabc35796b9fe21ca2489b317550f93f6774ca
| 3,641,513
|
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN][entry.entry_id].stop()
return unload_ok
|
38460ec92c350cdfcae0094039e834c3344369d8
| 3,641,514
|
def is_serial_increased(old, new):
""" Return true if serial number was increased using RFC 1982 logic. """
old, new = (int(n) for n in [old, new])
diff = (new - old) % 2**32
return 0 < diff < (2**31 - 1)
|
44a33a1c7e8caebe3b74284002c7c4be6ac29b40
| 3,641,515
|
def svn_relpath_skip_ancestor(parent_relpath, child_relpath):
"""svn_relpath_skip_ancestor(char const * parent_relpath, char const * child_relpath) -> char const *"""
return _core.svn_relpath_skip_ancestor(parent_relpath, child_relpath)
|
23ca0f2e91f0c69e7b410983603ef98e9dea4c13
| 3,641,516
|
def rnn_model(input_dim, units, activation, output_dim=29):
""" Build a recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# Add recurrent layer
simp_rnn = GRU(units, activation=activation,
return_sequences=True, implementation=2, name='rnn')(input_data)
bn_rnn = BatchNormalization(name='bn_rnn')(simp_rnn)
time_dense = TimeDistributed(Dense(output_dim))(bn_rnn)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
|
2b0c4614e0e80888db89fcc8e43ef0a6614400cb
| 3,641,517
|
def _pad_statistic(arr, pad_width, stat_length, stat_op):
"""
pads the array with values calculated along the given axis, used in mode: "maximum",
"minimum", "mean"
"""
ndim = arr.ndim
shape = arr.shape
if stat_length is None:
stat_length = _make_stat_length(shape)
else:
stat_length = _convert_pad_to_nd(stat_length, ndim)
stat_length = _limit_stat_length(stat_length, shape)
for i in range(ndim):
pad_before = stat_op(_slice_along_axis(arr, i, 0, stat_length[i][0]), i)
pad_before = (F.tile(pad_before, _tuple_setitem((1,)*ndim, i, pad_width[i][0])),)
pad_after = stat_op(_slice_along_axis(arr, i, shape[i]-stat_length[i][1], shape[i]), i)
pad_after = (F.tile(pad_after, _tuple_setitem((1,)*ndim, i, pad_width[i][1])),)
tensor_with_pad = pad_before + (arr,) + pad_after
arr = concatenate(tensor_with_pad, axis=i)
return arr
|
4976615e4d41f48d5063ed9af0719801dbe1f9db
| 3,641,518
|
def register_do(mysql, json):
"""
helper function that registers data objects into MySQL DB
@param mysql: a mysql object for MySQL database
@param json: metadata that contains information for data source and device
"""
cnx = mysql.connect()
cursor = cnx.cursor()
dataSource = json["dataSource"]
device = json["device"]
deviceSummary = json["deviceSummary"]
dataSource_arr = [dataSource["name"], int(dataSource["srcID"])]
device_arr =
[int(device["ID"]),
int(device["dataSize"]),
device["location"],
device["name"],
int(device["srcID"]),
device["type"]]
deviceSummary_arr =
[int(deviceSummary["ID"]),
deviceSummary["accessDuration"],
int(deviceSummary["deviceID"])]
cursor.execute("INSERT INTO dataSource (name, srcID) VALUES (%s, %d)", dataSource_arr)
cursor.execute("INSERT INTO device (ID, dataSize, location, name, srcID, type) VALUES (%d, %d, %s, %s, %d, %s)", device_arr)
cursor.execute("INSERT INTO deviceSummary (ID, accessDuration, deviceID) VALUES (%d, %s, %d)", deviceSummary_arr)
cnx.commit()
return "data object registration success"
|
bfb8aa83e91955691d1b3a15c73c5e36d5e3b6b8
| 3,641,519
|
import decimal
def split_amount(amount, splits, places=2):
"""Return list of ``splits`` amounts where sum of items equals ``amount``.
>>> from decimal import Decimal
>>> split_amount(Decimal('12'), 1)
Decimal('12.00')
>>> split_amount(Decimal('12'), 2)
[Decimal('6.00'), Decimal('6.00')]
Amounts have a max of ``places`` decimal places. Last amount in the list
may not be the same as others (will always be lower than or equal to
others).
>>> split_amount(Decimal('100'), 3)
[Decimal('33,34'), Decimal('33,34'), Decimal('33,32')]
>>> split_amount(Decimal('100'), 3, 4)
[Decimal('33,3334'), Decimal('33,3334'), Decimal('33,3332')]
>>> split_amount(Decimal('12'), 7) # Doctest: +ELLIPSIS
[Decimal('1.72'), ..., Decimal('1.72'), ..., Decimal('1.68')]
>>> split_amount(Decimal('12'), 17) # Doctest: +ELLIPSIS
[Decimal('0.71'), ..., Decimal('0.71'), Decimal('0.64')]
"""
one = decimal.Decimal(10) ** -places
amount = amount.quantize(one)
with decimal.localcontext() as decimal_context:
decimal_context.rounding = decimal.ROUND_UP
upper_split = (amount / splits).quantize(one)
splitted_amounts = [upper_split] * (splits - 1)
lower_split = amount - sum(splitted_amounts)
splitted_amounts.append(lower_split)
return splitted_amounts
|
8c8a17ed9bbcab194550ea78a9b414f51ca5610d
| 3,641,520
|
from datetime import timedelta
def shift_compare_date(df, date_field, smaller_eq_than_days=1, compare_with_next=False):
""" ATENTION: This Dataframe need to be sorted!!!
"""
if compare_with_next:
s = (
(df[date_field].shift(-1) - df[date_field]
) <= timedelta(days=smaller_eq_than_days)
) & (
(df[date_field].shift(-1) - df[date_field]) > timedelta(days=0)
)
else:
s = (
(df[date_field] - df[date_field].shift(1)
) <= timedelta(days=smaller_eq_than_days)
) & (
(df[date_field] - df[date_field].shift(1)) >= timedelta(days=0)
)
return s
|
56d4466f61cb6329ec1e365ad74f349d6043dd0a
| 3,641,521
|
def format_alleles(variant):
"""Gets a string representation of the variant's alleles.
Args:
variant: nucleus.genomics.v1.Variant.
Returns:
A string ref_bases/alt1,alt2 etc.
"""
return '{}/{}'.format(variant.reference_bases, ','.join(
variant.alternate_bases))
|
775fe3e112ff0b7e73780600e0621a8695fa5ad0
| 3,641,522
|
import numbers
def _validate_inputs(input_list, input_names, method_name):
"""
This method will validate the inputs of other methods.
input_list is a list of the inputs passed to a method.
input_name is a list of the variable names associated with
input_list
method_name is the name of the method whose input is being validated.
_validate_inputs will verify that all of the inputs in input_list are:
1) of the same type
2) either numpy arrays or instances of numbers.Number (floats or ints)
3) if they are numpy arrays, they all have the same length
If any of these criteria are violated, a RuntimeError will be raised
returns True if the inputs are numpy arrays; False if not
"""
if isinstance(input_list[0], np.ndarray):
desired_type = np.ndarray
elif isinstance(input_list[0], numbers.Number):
desired_type = numbers.Number
else:
raise RuntimeError("The arg %s input to method %s " % (input_names[0], method_name) +
"should be either a number or a numpy array")
valid_type = True
bad_names = []
for ii, nn in zip(input_list, input_names):
if not isinstance(ii, desired_type):
valid_type = False
bad_names.append(nn)
if not valid_type:
msg = "The input arguments:\n"
for nn in bad_names:
msg += "%s,\n" % nn
msg += "passed to %s " % method_name
msg += "need to be either numbers or numpy arrays\n"
msg += "and the same type as the argument %s" % input_names[0]
msg += "\n\nTypes of arguments are:\n"
for name, arg in zip(input_names, input_list):
msg += '%s: %s\n' % (name, type(arg))
raise RuntimeError(msg)
if desired_type is np.ndarray:
same_length = True
for ii in input_list:
if len(ii) != len(input_list[0]):
same_length = False
if not same_length:
raise RuntimeError("The arrays input to %s " % method_name +
"all need to have the same length")
if desired_type is np.ndarray:
return True
return False
|
25a72bd99639b4aab23459635fce116e08299bdc
| 3,641,523
|
def server_base_url(environ):
"""
Using information in tiddlyweb.config, construct
the base URL of the server, sans the trailing /.
"""
return '%s%s' % (server_host_url(environ), _server_prefix(environ))
|
3919c9223039929530d6543c13e39b880c657d4f
| 3,641,524
|
def calc_ctrlg_ratio(rpl: sc2reader.resources.Replay,
pid: int) -> dict[str, float]:
"""Calculates the ratio between `ControlGroupEvents` and the union of
the `CommandEvents`, `SelectionEvents` and `ControlGroupCommand` sets
to quantify the players' level of awareness and use of this tactical
feature.
*Args*
- rpl (sc2reader.resources.Replay)
The replay being analysed.
- pid (int)
In-game player ID of the player being considered in the
analysis.
*Returns*
- (dict[str, float])
"""
command_secs = {e.second for e in rpl.events
if isinstance(e, sc2reader.events.game.CommandEvent)
and e.pid == (pid - 1)}
select_secs = {e.second for e in rpl.events
if isinstance(e, sc2reader.events.game.SelectionEvent)
and e.pid == (pid - 1)}
ctrlg_secs = {e.second for e in rpl.events
if isinstance(e, sc2reader.events.game.ControlGroupEvent)
and e.pid == (pid - 1)}
total_counted_events = len(command_secs | select_secs | ctrlg_secs)
if not total_counted_events:
return {"ctrlg_ratio": 0}
return {"ctrlg_ratio": len(ctrlg_secs)/total_counted_events}
|
74d79128bba3584a4966e0bb8f2ce0e4dfdf402e
| 3,641,525
|
import tqdm
def draw_normal_surface(pcd, scale, estimation_params=None):
"""Draw and return a mesh of arrows of normal vectors for each point
in the given cloud
Parameters
----------
pcd : o3d.geometry.PointCloud
Input point cloud
scale : float
Scale of the default arrow which is 1 meter length
estimation_params : dict, optional
Normal estimatino parameters if input does not contain normals, by default None
Returns
-------
o3d.geometry.TriangleMesh
Collection of normal arrows as a single triangle mesh
"""
if len(pcd.normals) != len(pcd.points):
pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(**estimation_params))
arrow_params = get_default_arrow(scale)
normal_surface = None
pairs = zip(np.asarray(pcd.points), np.asarray(pcd.normals))
for point, normal in tqdm(pairs, total=len(pcd.points), ncols=100):
arrow = draw_arrow(point, normal, (0, 1, 0), arrow_params)
if normal_surface is None:
normal_surface = arrow
else:
normal_surface += arrow
return normal_surface
|
cb54f2a84febe82b03806b09af0a8c99fecc0669
| 3,641,528
|
def texture_from_clusters(clusters):
""" Compute the GLCM texture properties from image clusters.
:param clusters: clusters of pixels representing sections of the image
:returns: DataFrame -- of texture features for every cluster.
"""
thetas = np.arange(0, np.pi, np.pi/8)
props = ['contrast', 'dissimilarity', 'homogeneity', 'energy']
tex_features = []
for i, cluster in enumerate(clusters):
prop_suffix = '_cluster_%d' % (i+1)
col_names = [name + prop_suffix for name in props]
features = glcm_features(cluster, [1], thetas, props)
# compute mean across all orientations
features = np.mean(features, axis=2)
df = pd.DataFrame(features.T, columns=col_names)
tex_features.append(df)
return pd.concat(tex_features, axis=1)
|
353aa3bbc1fec765fd01e201bd769e00bbf8a1fa
| 3,641,529
|
def parse_dict(input_data):
"""Return a rules dict of the format:
{
'light red': [(1, 'bright white'), (2, 'muted yellow')],
'dark orange': [(3, bright white), (4, muted yellow)],
'faded blue': [(0, 'bags')]
}
"""
bags = dict()
for line in input_data.split('\n'):
outer, inner = line.strip().split(' bags contain ')
inner = [i.split(' ') for i in inner.split(", ")]
if 'no' in inner[0]:
bags[outer] = [(0, 'bags')]
else:
bags[outer] = [(int(i[0]), ' '.join(i[1:3])) for i in inner]
return bags
|
a1aad66a16e4754c35c9b3518d5641096e393530
| 3,641,530
|
def distance_without_normalise(bin_image):
"""
Takes a binary image and returns a distance transform version of it.
"""
res = np.zeros_like(bin_image)
for j in range(1, bin_image.max() + 1):
one_cell = np.zeros_like(bin_image)
one_cell[bin_image == j] = 1
one_cell = distance_transform_cdt(one_cell)
res[bin_image == j] = one_cell[bin_image == j]
res = res.astype('uint8')
return res
|
ed4cf85498a74e2f7d030daefceebf66e460e0fd
| 3,641,532
|
def list_inventory():
""" Returns all of the Inventory """
app.logger.info('Request for inventory list')
inventory = []
category = request.args.get('category')
name = request.args.get('name')
condition = request.args.get('condition')
count = request.args.get('count')
available = request.args.get('available')
if category:
inventory = Inventory.find_by_category(category)
elif name:
inventory = Inventory.find_by_name(name)
else:
inventory = Inventory.all()
results = [inventory.serialize() for inventory in inventory]
return make_response(jsonify(results), status.HTTP_200_OK)
|
381de71a10d1626f44710643cd837523e9a930ed
| 3,641,533
|
def is_instance_method(obj):
"""Checks if an object is a bound method on an instance."""
if not isinstance(obj, MethodType):
return False # Not a method
elif obj.__self__ is None:
return False # Method is not bound
elif (
issubclass(obj.__self__.__class__, type)
or hasattr(obj.__self__, "__class__")
and obj.__self__.__class__
):
return False # Method is a classmethod
return True
|
82050391193388cdb6d9466442774e6b0fa6878c
| 3,641,535
|
from typing import List
from typing import Dict
def _clean_empty_and_duplicate_authors_from_grobid_parse(authors: List[Dict]) -> List[Dict]:
"""
Within affiliation, `location` is a dict with fields <settlement>, <region>, <country>, <postCode>, etc.
Too much hassle, so just take the first one that's not empty.
"""
# stripping empties
clean_authors_list = []
for author in authors:
clean_first = author['first'].strip()
clean_last = author['last'].strip()
clean_middle = [m.strip() for m in author['middle']]
clean_suffix = author['suffix'].strip()
if clean_first or clean_last or clean_middle:
author['first'] = clean_first
author['last'] = clean_last
author['middle'] = clean_middle
author['suffix'] = clean_suffix
clean_authors_list.append(author)
# combining duplicates (preserve first occurrence of author name as position)
key_to_author_blobs = {}
ordered_keys_by_author_pos = []
for author in clean_authors_list:
key = (author['first'], author['last'], ' '.join(author['middle']), author['suffix'])
if key not in key_to_author_blobs:
key_to_author_blobs[key] = author
ordered_keys_by_author_pos.append(key)
else:
if author['email']:
key_to_author_blobs[key]['email'] = author['email']
if author['affiliation'] and (author['affiliation']['institution'] or author['affiliation']['laboratory'] or author['affiliation']['location']):
key_to_author_blobs[key]['affiliation'] = author['affiliation']
dedup_authors_list = [key_to_author_blobs[key] for key in ordered_keys_by_author_pos]
return dedup_authors_list
|
5a02b877ee074270c544c7dbb06dd1ceab487e79
| 3,641,536
|
def get_distutils_display_options():
""" Returns a set of all the distutils display options in their long and
short forms. These are the setup.py arguments such as --name or --version
which print the project's metadata and then exit.
Returns
-------
opts : set
The long and short form display option arguments, including the - or --
"""
short_display_opts = set('-' + o[1] for o in Distribution.display_options
if o[1])
long_display_opts = set('--' + o[0] for o in Distribution.display_options)
# Include -h and --help which are not explicitly listed in
# Distribution.display_options (as they are handled by optparse)
short_display_opts.add('-h')
long_display_opts.add('--help')
# This isn't the greatest approach to hardcode these commands.
# However, there doesn't seem to be a good way to determine
# whether build *will be* run as part of the command at this
# phase.
display_commands = set([
'clean', 'register', 'setopt', 'saveopts', 'egg_info',
'alias'])
return short_display_opts.union(long_display_opts.union(display_commands))
|
86e87f22ea97db4a2642ef578999ad1f0cd67a66
| 3,641,537
|
def get_followers(api, user_id):
"""Returns list of followers"""
followers = []
next_max_id = ''
while next_max_id is not None:
_ = api.getUserFollowers(user_id, maxid=next_max_id)
followers.extend(api.LastJson.get('users', []))
next_max_id = api.LastJson.get('next_max_id', '')
return followers
|
debfb11fe0b8b22232b82e9a8ea360a4d2a8cdc1
| 3,641,538
|
def map(v, ds, de, ts, te):
"""\
Map the value v, in range [ds, de] to
the corresponding value in range [ts, te]
"""
d1 = de - ds
d2 = te - ts
v2 = v - ds
r = v2 / d1
return ts + d2 * r
|
2c2ba49b2acc283ca25b07c10b7ad717ad6a280d
| 3,641,539
|
def get_Q_body(hs_type, Theta_SW_hs):
"""温水暖房用熱源機の筐体放熱損失 (2)
Args:
hs_type(str): 温水暖房用熱源機の種類
Theta_SW_hs(ndarray): 温水暖房用熱源機の往き温水温度
Returns:
ndarray: 温水暖房用熱源機の筐体放熱損失
"""
if hs_type in ['石油従来型暖房機', '石油従来型温水暖房機', '石油従来型給湯温水暖房機', '不明']:
# (2a)
return [234 * 3600 * 10 ** (-6)] * 24 * 365
elif hs_type in ['石油潜熱回収型暖房機', '石油潜熱回収型温水暖房機', '石油潜熱回収型給湯温水暖房機']:
# (2b)
return (5.3928 * Theta_SW_hs - 71.903) * 3600 * 10 ** (-6)
else:
raise ValueError(hs_type)
|
60e35a31d9c9b2f5d77d3d6f1518b7a20484fad2
| 3,641,540
|
def softmax(inputs):
"""
Calculate the softmax for the give inputs (array)
:param inputs:
:return:
"""
return np.exp(inputs) / float(sum(np.exp(inputs)))
|
eb8e215e24fbc30e08e986d9b9498973a866cb9b
| 3,641,541
|
def _get_turn_angle(start_angle, target_angle):
"""
Difference in angle in the range -180 to +180 (where negative is counter clockwise)
Parameters
----------
start_angle, target_angle : float
Returns
-------
float
difference in angle.
"""
return _map_to_pm180(target_angle - start_angle)
|
7f41482ec69c4d3c4c4b3e1afb674ad46e7d607b
| 3,641,543
|
import ctypes
def load(fname):
"""Load symbol from a JSON file.
You can also use pickle to do the job if you only work on python.
The advantage of load/save is the file is language agnostic.
This means the file saved using save can be loaded by other language binding of mxnet.
You also get the benefit being able to directly load/save from cloud storage(S3, HDFS)
Parameters
----------
fname : str
The name of the file, examples:
- `s3://my-bucket/path/my-s3-symbol`
- `hdfs://my-bucket/path/my-hdfs-symbol`
- `/path-to/my-local-symbol`
Returns
-------
sym : Symbol
The loaded symbol.
See Also
--------
Symbol.save : Used to save symbol into file.
"""
if not isinstance(fname, string_types):
raise TypeError('fname need to be string')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateFromFile(c_str(fname), ctypes.byref(handle)))
return Symbol(handle)
|
bbeb4f5eb63a5ad656814d0ded27d7edbd9936d8
| 3,641,544
|
def filter_pairs(pairs):
"""returns pairs of with filter_pair()==True"""
return [pair for pair in pairs if filter_pair(pair)]
|
ce65a6ec84ea8b637771d75a5334af7d90bafa15
| 3,641,545
|
def merge(list_geo, npts=5):
"""
merge a list of cad_geometries and update internal/external faces and connectivities
Args:
list_geo: a list of cad_geometries
Returns:
a cad_geometries
"""
geo_f = list_geo[0]
for geo in list_geo[1:]:
geo_f = geo_f.merge(geo, npts=npts)
return geo_f
|
70db1b52be8ae70d21f689c8f12e051d9c41cd64
| 3,641,546
|
from typing import Union
from typing import Sequence
from typing import Dict
import warnings
from pathlib import Path
import tqdm
import logging
def prepare_commonvoice(
corpus_dir: Pathlike,
output_dir: Pathlike,
languages: Union[str, Sequence[str]] = "auto",
splits: Union[str, Sequence[str]] = COMMONVOICE_DEFAULT_SPLITS,
num_jobs: int = 1,
) -> Dict[str, Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]]:
"""
Returns the manifests which consist of the Recordings and Supervisions.
When all the manifests are available in the ``output_dir``, it will simply read and return them.
This function expects the input directory structure of::
>>> metadata_path = corpus_dir / language_code / "{train,dev,test}.tsv"
>>> # e.g. pl_train_metadata_path = "/path/to/cv-corpus-7.0-2021-07-21/pl/train.tsv"
>>> audio_path = corpus_dir / language_code / "clips"
>>> # e.g. pl_audio_path = "/path/to/cv-corpus-7.0-2021-07-21/pl/clips"
Returns a dict with 3-level structure (lang -> split -> manifest-type)::
>>> {'en/fr/pl/...': {'train/dev/test': {'recordings/supervisions': manifest}}}
:param corpus_dir: Pathlike, the path to the downloaded corpus.
:param output_dir: Pathlike, the path where to write the manifests.
:param languages: 'auto' (prepare all discovered data) or a list of language codes.
:param splits: by default ``['train', 'dev', 'test']``, can also include
``'validated'``, ``'invalidated'``, and ``'other'``.
:param num_jobs: How many concurrent workers to use for scanning of the audio files.
:return: a dict with manifests for all specified languagues and their train/dev/test splits.
"""
if not is_module_available("pandas"):
raise ValueError(
"To prepare CommonVoice data, please 'pip install pandas' first."
)
if num_jobs > 1:
warnings.warn(
"num_jobs>1 currently not supported for CommonVoice data prep;"
"setting to 1."
)
corpus_dir = Path(corpus_dir)
assert corpus_dir.is_dir(), f"No such directory: {corpus_dir}"
assert output_dir is not None, (
"CommonVoice recipe requires to specify the output "
"manifest directory (output_dir cannot be None)."
)
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
if languages == "auto":
languages = set(COMMONVOICE_LANGS).intersection(
path.name for path in corpus_dir.glob("*")
)
if not languages:
raise ValueError(
f"Could not find any of CommonVoice languages in: {corpus_dir}"
)
elif isinstance(languages, str):
languages = [languages]
manifests = {}
for lang in tqdm(languages, desc="Processing CommonVoice languages"):
logging.info(f"Language: {lang}")
lang_path = corpus_dir / lang
# Maybe the manifests already exist: we can read them and save a bit of preparation time.
# Pattern: "cv_recordings_en_train.jsonl.gz" / "cv_supervisions_en_train.jsonl.gz"
lang_manifests = read_cv_manifests_if_cached(
output_dir=output_dir, language=lang
)
for part in splits:
logging.info(f"Split: {part}")
if part in lang_manifests:
logging.info(
f"CommonVoice language: {lang} already prepared - skipping."
)
continue
recording_set, supervision_set = prepare_single_commonvoice_tsv(
lang=lang,
part=part,
output_dir=output_dir,
lang_path=lang_path,
)
lang_manifests[part] = {
"supervisions": supervision_set,
"recordings": recording_set,
}
manifests[lang] = lang_manifests
return manifests
|
1f2be866e9003224588a6e2cd4a29500854e9fb9
| 3,641,548
|
def plot_array_trans(pdata,a,copy=False):
"""
Warning!!!
----------
Latest Information: 22/05/2012 this is deprecated and plot_array_transg is used instead.
Purpose:
--------
Transform array according to speficication in list a. return a copy if copy is True.
Example:
--------
>>> b=np.arange(-9,9.1,0.5)
>>> pdata=np.ones((37,37))
>>> for i in range(37):
pdata[i]=b
>>> a=[(-9, -4), (-1, -0.5, 0, 0.5, 1), (4, 9)]
>>> plot_array_trans(pdata,a)
In [104]: plot_array_trans(pdata,a)
Out[104]:
(array([[-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ],
[-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ],
[-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ],
...,
[-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ],
[-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ],
[-2. , -1.95, -1.9 , ..., 1.9 , 1.95, 2. ]]),
[-2.0, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2.0],
[-9, -4, -1, -0.5, 0, 0.5, 1, 4, 9])
"""
if copy:
pdata_trans=pcopy.deepcopy(pdata)
else:
pdata_trans=pdata
low_range=a[0]
mid_range=a[1]
high_range=a[2]
if len(mid_range)==1:
raise ValueError('there is only 1 element in middle range!')
else:
interval=mid_range[1]-mid_range[0]
#
if isinstance(low_range,tuple):
low_range_plot=pcopy.deepcopy(list(low_range))
else:
low_range_plot=pcopy.deepcopy(list([low_range]))
for i in range(len(low_range_plot)):
low_range_plot[i]=mid_range[0]-interval*(len(low_range_plot)-i)
if isinstance(high_range,tuple):
high_range_plot=pcopy.deepcopy(list(high_range))
else:
high_range_plot=pcopy.deepcopy(list([high_range]))
for i in range(len(high_range_plot)):
high_range_plot[i]=mid_range[-1]+interval*(i+1)
if len(low_range_plot)==1:
pdata_trans=arraylintrans(pdata_trans,(low_range,mid_range[0]),(low_range_plot[0],mid_range[0]))
else:
for i in range(len(low_range_plot))[::-1]:
if i != len(low_range_plot)-1:
pdata_trans=arraylintrans(pdata_trans,(low_range[i],low_range[i+1]),(low_range_plot[i],low_range_plot[i+1]))
else:
pdata_trans=arraylintrans(pdata_trans,(low_range[i],mid_range[0]),(low_range_plot[i],mid_range[0]))
if len(high_range_plot)==1:
pdata_trans=arraylintrans(pdata_trans,(mid_range[-1],high_range),(mid_range[-1],high_range_plot[0]))
else:
for i in range(len(high_range_plot)):
if i ==0:
pdata_trans=arraylintrans(pdata_trans,(mid_range[-1],high_range[0]),(mid_range[-1],high_range_plot[0]))
else:
pdata_trans=arraylintrans(pdata_trans,(high_range[i-1],high_range[i]),(high_range_plot[i-1],high_range_plot[i]))
if not hasattr(low_range,'__iter__'):
low_range=list([low_range])
if not hasattr(high_range,'__iter__'):
high_range=list([high_range])
levtemp=[low_range_plot,mid_range,high_range_plot]
levels=[j for i in levtemp for j in i]
labtemp=[low_range,mid_range,high_range]
lab=[j for i in labtemp for j in i]
return pdata_trans,levels,lab
|
0b885fba59fa34f567df5f6891ecdbe46d8a8be9
| 3,641,549
|
def process_generate_api_token_data(post_data):
"""
This expects the post_data to contain an array called ``user_to_form``.
Each item in this array is of the form:
.. code-block:: python
'<UserID>.<form_prefix>' (i.e. '1.form-0')
Each form then may add two form data key-value pairs:
.. code-block:: python
'<form_prefix>-expiration_date': '<date>' (i.e. 'form-0-expiration_date': '2021-06-04')
"""
user_to_form_pairs = [pair.split('.') for pair in post_data.getlist('user_to_form')]
user_form_data = []
for user_id, form_prefix in user_to_form_pairs:
user = User.objects.get(UserID=user_id)
form_data = dict_filter_keys_start_with(form_prefix, post_data)
date_str = '-'.join([form_data.get('ExpirationDate_year', ''),
form_data.get('ExpirationDate_month', ''),
form_data.get('ExpirationDate_day', '')])
expiration_date = set_date_from_str(date_str=date_str)
user_form_data.append({'user': user,
'expires': expiration_date})
return user_form_data
|
8da8c2566621bdc8710091daf604a292a30c602a
| 3,641,550
|
from bpy import context as C
from bpy import data as D
def add_vcolor(hemis, mesh=None, name='color'):
"""Seems like `hemis` is color you wish to apply to currently selected mesh."""
if mesh is None:
mesh = C.scene.objects.active.data
elif isinstance(mesh, str):
mesh = D.meshes[mesh]
bpy.ops.object.mode_set(mode='OBJECT')
color = hemis
if len(hemis) == 2:
color = hemis[0]
if len(mesh.vertices) == len(hemis[1]):
color = hemis[1]
vcolor = mesh.vertex_colors.new(name)
if hasattr(mesh, "loops"):
loopidx = [0]*len(mesh.loops)
mesh.loops.foreach_get('vertex_index', loopidx)
if not isinstance(color[0], (list, tuple)):
for i, j in enumerate(loopidx):
vcolor.data[i].color = [color[j]]*3
else:
for i, j in enumerate(loopidx):
vcolor.data[i].color = color[j]
else:
# older blender version, need to iterate faces instead
print("older blender found...")
if not isinstance(color[0], (list, tuple)):
for i in range(len(mesh.faces)):
v = mesh.faces[i].vertices
vcolor.data[i].color1 = [color[v[0]]] * 3
vcolor.data[i].color2 = [color[v[1]]] * 3
vcolor.data[i].color3 = [color[v[2]]] * 3
else:
for i in len(vcolor):
v = mesh.faces[i].vertices
vcolor.data[i].color1 = color[v[0]]
vcolor.data[i].color2 = color[v[1]]
vcolor.data[i].color3 = color[v[2]]
print("Successfully added vcolor '%s'"%name)
return vcolor
|
9199411ab0265c8e16e4a8bb2dfa45f9550d5d1a
| 3,641,551
|
import itertools
import pandas as pd
def gridSeach(model, parameters, features, response, train, test):
"""
This function performs a grid search over the parameter space.
It is simplistic and only allows certain range of values. If there
is a parameter in the models that needs to be a list it has to be modified.
"""
names = sorted(parameters)
combinations = list(itertools.product(*(parameters[name] for name in names)))
names.append('r2')
model_matrix = pd.DataFrame(columns=names)
for c in combinations:
dictionary = dict(zip(names, c))
model = model.set_params(**dictionary)
model.fit(features[train], response[train])
if 'hidden_layer_sizes' in dictionary:
dictionary.update({'hidden_layer_sizes':[dictionary['hidden_layer_sizes']],
'r2':model.score(features[test], response[test])})
else:
dictionary.update({'r2':model.score(features[test], response[test])})
model_matrix = model_matrix.append(dictionary, ignore_index=True)
dictionary = dict(model_matrix.ix[model_matrix['r2'].argmax(),:-1])
if 'hidden_layer_sizes' in dictionary:
dictionary.update({'hidden_layer_sizes':dictionary['hidden_layer_sizes'][0]})
if 'n_neighbors' in dictionary:
dictionary.update({'n_neighbors':int(dictionary['n_neighbors'])})
model = model.set_params(**dictionary)
model.fit(features[train], response[train])
return (model, model_matrix)
|
69d406cb16312d2777e7aa0562f77c77b20c44f7
| 3,641,552
|
def cvt_lambdef(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base:
"""lambdef: 'lambda' [varargslist] ':' test"""
assert ctx.is_REF, [node]
name = xcast(ast_cooked.NameBindsNode, cvt(node.children[0], ctx.to_BINDING()))
ctx_func = new_ctx_from(ctx)
if len(node.children) == 4:
parameters = xcast(ast_cooked.BareTypedArgsListNode, cvt(node.children[1], ctx_func))
suite = cvt(node.children[3], ctx_func)
else:
parameters = ast_cooked.BareTypedArgsListNode(args=[])
suite = cvt(node.children[2], ctx_func)
return ast_cooked.FuncDefStmt(name=name,
parameters=parameters.args,
return_type=ast_cooked.OMITTED_NODE,
suite=suite,
scope_bindings=ctx_func.scope_bindings)
|
42ee22a02c2d003afc808bc3e28f18a57e3153fe
| 3,641,553
|
import re
def ischapter_name(text_str):
"""判断是否是章节名"""
if re.match(r'^第(.{1,9})([章节回卷集部篇])(\s*)(.*)', text_str):
return True
else:
return False
|
c89a34408def2c2f9026045925212c2dde88a41d
| 3,641,554
|
def calc_mean_onbit_density(bitsets, number_of_bits):
"""Calculate the mean density of bits that are on in bitsets collection.
Args:
bitsets (list[pyroaring.BitMap]): List of fingerprints
number_of_bits: Number of bits for all fingerprints
Returns:
float: Mean on bit density
"""
all_nr_onbits = [len(v) for v in bitsets]
mean_onbit = fsum(all_nr_onbits) / float(len(all_nr_onbits))
density = mean_onbit / number_of_bits
return float(density)
|
4d68ff5c280708d930d8e1525753804f831fc9da
| 3,641,555
|
from typing import List
from typing import Dict
from typing import Any
from typing import Optional
def get_parameters(path: str) -> List[Dict[str, Any]]:
"""
Retrieve parameters from AWS SSM Parameter Store. Decrypts any encrypted parameters.
Relies on the appropriate environment variables to authenticate against AWS:
https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html
"""
ssm = boto3.client("ssm")
next_token: Optional[bool] = True
parameters: List[Dict[str, Any]] = []
while next_token is not None:
kwargs = {"Path": path, "Recursive": False, "WithDecryption": True}
if next_token is not True:
kwargs["NextToken"] = next_token
response = ssm.get_parameters_by_path(**kwargs)
new_parameters = response.get("Parameters", [])
parameters.extend(new_parameters)
next_token = response.get("NextToken")
return parameters
|
0905e9e707dfa45b9dab8137676fac14e496e594
| 3,641,557
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.