content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def test_db_transaction_n1(monkeypatch):
"""Raise _DB_TRANSACTION_ATTEMPTS OperationalErrors to force a reconnection.
A cursor for each SQL statement should be returned in the order
the statement were submitted.
0. The first statement execution produce no results _DB_TRANSACTION_ATTEMPTS times (OperationalError)
1. A reconnection will occur
2. The first statement will be re-executed
3. The second statement will be executed
4. The third statement will be executed
Should get 3 cursors with the values _DB_TRANSACTION_ATTEMPTS, _DB_TRANSACTION_ATTEMPTS+1, & _DB_TRANSACTION_ATTEMPTS+2
The next mock_connection_ref should be 2
"""
db_disconnect_all()
mock_connection_ref = sequential_reference()
mock_cursor_ref = sequential_reference()
class mock_cursor():
def __init__(self) -> None: self.value = next(mock_cursor_ref)
def execute(self, sql_str):
if self.value < _DB_TRANSACTION_ATTEMPTS:
raise OperationalError
def fetchone(self): return self.value
class mock_connection():
def __init__(self) -> None: self.value = next(mock_connection_ref)
def cursor(self): return mock_cursor()
def close(self): self.value = None
def mock_connect(*args, **kwargs): return mock_connection()
monkeypatch.setattr(database, 'connect', mock_connect)
dbcur_list = db_transaction(_MOCK_DBNAME, _MOCK_CONFIG, ("SQL0", "SQL1", "SQL2"))
assert len(dbcur_list) == 3
assert dbcur_list[0].fetchone() == _DB_TRANSACTION_ATTEMPTS
assert dbcur_list[1].fetchone() == _DB_TRANSACTION_ATTEMPTS + 1
assert dbcur_list[2].fetchone() == _DB_TRANSACTION_ATTEMPTS + 2
assert next(mock_connection_ref) == 2
|
4dcb32f14d8a938765f4fde5375b6b686a6a5f5c
| 3,639,254
|
import requests
from datetime import datetime
def fetch_status():
"""
解析サイト<https://redive.estertion.win> からクラバト情報を取ってくる
return
----
```
{
"cb_start": datetime,
"cb_end": datetime,
"cb_days": int
}
```
"""
# クラバト開催情報取得
r = requests.get(
"https://redive.estertion.win/ver_log_redive/?page=1&filter=clan_battle"
).json()
# クラバト開始日取得
cb_start = r["data"][0]["clan_battle"][0]["start"]
cb_start = datetime.strptime(cb_start, "%Y/%m/%d %H:%M:%S")
# クラバト終了日取得
cb_end = r["data"][0]["clan_battle"][0]["end"]
cb_end = datetime.strptime(cb_end, "%Y/%m/%d %H:%M:%S")
# クラバト開催日数
cb_days = (cb_end - cb_start).days + 1
return {"cb_start": cb_start, "cb_end": cb_end, "cb_days": cb_days}
|
683c9fe84bf346a1cce703063da8683d3469ccc2
| 3,639,255
|
def data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_post(uuid, tapi_path_computation_routing_constraint=None): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_routing_constraint_post
creates tapi.path.computation.RoutingConstraint # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param tapi_path_computation_routing_constraint: tapi.path.computation.RoutingConstraint to be added to list
:type tapi_path_computation_routing_constraint: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_path_computation_routing_constraint = TapiPathComputationRoutingConstraint.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
|
7d56e6a544b2ac720aa311127aa5db9b3153a0c3
| 3,639,256
|
def A004086(i: int) -> int:
"""Digit reversal of i."""
result = 0
while i > 0:
unit = i % 10
result = result * 10 + unit
i = i // 10
return result
|
b0a65b7e203b7a92f7d6a1846888798c369ac869
| 3,639,257
|
def should_raise_sequencingerror(wait, nrep, jump_to, goto, num_elms):
"""
Function to tell us whether a SequencingError should be raised
"""
if wait not in [0, 1]:
return True
if nrep not in range(0, 16384):
return True
if jump_to not in range(-1, num_elms+1):
return True
if goto not in range(0, num_elms+1):
return True
return False
|
fc7c4bdb29cd5b90faec59a4f6705b920304aae0
| 3,639,258
|
from typing import Optional
from typing import Mapping
import functools
def add_task_with_sentinels(
task_name: str,
num_sentinels: Optional[int] = 1):
"""Adds sentinels to the inputs/outputs of a task.
Adds num_sentinels sentinels to the end of 'inputs' and at the beginning
of 'targets'. This is known to help fine-tuning span corruption models,
especially on smaller datasets.
This will also rename the task by adding a "_{num_sentinels}_sentinel" suffix
to the task name, but making sure it comes before the following suffixes:
'_train', '_dev', '_test', '.'.
Example before:
'inputs': What is the captial of illinois?
'targets': Springfield.
Example after:
'inputs': What is the captial of illinois? <extra_id_0>
'targets': <extra_id_0> Springfield.
Args:
task_name: a str, which is the name of the task you want to have sentinels
added to. Note this will not override the current task, but will create
a new one.
num_sentinels: integer, number of sentinels to end of inputs and the
beginning of targets.
"""
def _append_eos_after_trim_and_preserve(
dataset: tf.data.Dataset,
output_features: Mapping[str, dataset_providers.Feature],
sequence_length: Optional[Mapping[str, int]] = None,
preserve_final_n_tokens_when_trimming: Optional[int] = None
) -> tf.data.Dataset:
"""Version of append_eos_after_trim with option to preserve last n tokens."""
def _maybe_add_eos_and_trim(key: str, value: tf.Tensor) -> tf.Tensor:
if key not in output_features or not output_features[key].add_eos:
return value
eos_id = output_features[key].vocabulary.eos_id
if (sequence_length is not None and
sequence_length.get(key, None) is not None):
max_length = sequence_length[key]
if (preserve_final_n_tokens_when_trimming is not None and
preserve_final_n_tokens_when_trimming > 0):
# Compute the new length of the sequence excluding the EOS token.
trimmed_length = tf.minimum(max_length, tf.shape(value)[0] + 1)
# Can't preserve more tokens than the sequence length.
n_tokens_to_preserve = tf.minimum(
preserve_final_n_tokens_when_trimming, trimmed_length - 1)
# pylint: disable=invalid-unary-operand-type
return tf.concat(
[value[:trimmed_length-(n_tokens_to_preserve + 1)],
value[-n_tokens_to_preserve:],
[eos_id]], axis=0)
# pylint: enable=invalid-unary-operand-type
else:
return tf.concat([value[:max_length-1], [eos_id]], axis=0)
else:
return tf.concat([value, [eos_id]], axis=0)
return dataset.map(
lambda ex: {k: _maybe_add_eos_and_trim(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def _create_new_task_name(task_name):
"""Creates the new task name with sentinels added."""
sentinel_name = '_{}_sentinel'.format(num_sentinels)
# Avoid messing up evaluation suffixes, so insert the sentinel name right
# before these keywords.
for suffix in ['_train', '_dev', '_test', '_eval', '.']:
idx = task_name.find(suffix)
if idx >= 0:
return task_name[:idx] + sentinel_name + task_name[idx:]
return task_name + sentinel_name
def _sentinel_id(vocabulary, sentinel_num=0):
"""Token ID to use as a sentinel.
Args:
vocabulary: a t5.data.vocabularies.Vocabulary
sentinel_num: an optional interger, what sentinel should be returned.
By default it returns the first sentinel.
Returns:
an integer
"""
return vocabulary.vocab_size - 1 - sentinel_num
def _add_sentinels(dataset, sequence_length, output_features):
"""Adds sentinels to end of inputs and beginning of targets."""
del sequence_length
input_vocab = output_features['inputs'].vocabulary
target_vocab = output_features['targets'].vocabulary
@utils.map_over_dataset
def _my_fn(x):
sentinels_input = [
_sentinel_id(input_vocab, idx) for idx in range(num_sentinels)]
sentinels_output = [
_sentinel_id(target_vocab, idx) for idx in range(num_sentinels)]
x['inputs'] = tf.concat([x['inputs'], sentinels_input], 0)
x['targets'] = tf.concat([sentinels_output, x['targets']], 0)
return x
return _my_fn(dataset)
def _postprocess_fn_remove_sentinel(string_label, *args, **kwargs):
"""If sentinels are appended to the task, then remove them before eval."""
del args
del kwargs
vocab = task.output_features['targets'].vocabulary
sentinel_str = vocab.decode(
[_sentinel_id(vocab, idx) for idx in range(num_sentinels)])
if string_label.startswith(sentinel_str):
string_label = string_label[len(sentinel_str):].strip()
return string_label
def _wrap_postprocess_fn_remove_sentinel(postprocess_fn):
"""Wrap around another postprocess_fn to remove sentinels first."""
def new_fn(string_label, *args, **kwargs):
string_label = _postprocess_fn_remove_sentinel(
string_label, *args, **kwargs)
return postprocess_fn(string_label, *args, **kwargs)
return new_fn
# Create the new task name.
task = TaskRegistry.get(task_name)
sentinel_task_name = _create_new_task_name(task_name)
# Make the new preprocessors that will insert sentinels and make sure
# sentinels are preserved if the sequences are trimmed.
new_preprocessors = list(task.preprocessors)
if new_preprocessors[-1] is seqio_preprocessors.append_eos_after_trim:
new_eos_funtion = functools.partial(
_append_eos_after_trim_and_preserve,
preserve_final_n_tokens_when_trimming=num_sentinels)
new_preprocessors[-1] = new_eos_funtion
new_preprocessors.insert(-1, _add_sentinels)
else:
new_preprocessors.append(_add_sentinels)
# Remove the inserted sentinels in the postprocessor.
postprocess_fn = task.postprocessor
if postprocess_fn is not None:
new_postprocess_fn = _wrap_postprocess_fn_remove_sentinel(postprocess_fn)
else:
new_postprocess_fn = _postprocess_fn_remove_sentinel
TaskRegistry.add(
sentinel_task_name,
source=task.source,
preprocessors=new_preprocessors,
output_features=task.output_features,
postprocess_fn=new_postprocess_fn,
metric_fns=task.metric_fns,
)
|
2d040f37d4346770e836c5a8b71b90c1acce9d1d
| 3,639,259
|
def mk_llfdi(data_id, data): # measurement group 10
"""
transforms a k-llfdi.json form into the triples used by insertMeasurementGroup to
store each measurement that is in the form
:param data_id: unique id from the json form
:param data: data array from the json form
:return: The list of (typeid,valType,value) triples that are used by insertMeasurementGroup to add the measurements
"""
val_list = [(220, 2, data_id),
(55, 7, data['f1']), (56, 7, data['f2']), (57, 7, data['f3']),
(58, 7, data['f4']), (59, 7, data['f5']), (60, 7, data['f6']),
(61, 7, data['f7']), (62, 7, data['f8']), (63, 7, data['f9']),
(64, 7, data['f10']), (65, 7, data['f11']), (66, 7, data['f12']),
(67, 7, data['f13']), (68, 7, data['f14']), (69, 7, data['f15']),
(70, 7, data['f16']), (71, 7, data['f17']), (72, 7, data['f18']),
(73, 7, data['f19']), (74, 7, data['f20']), (75, 7, data['f21']),
(76, 7, data['f22']), (77, 7, data['f23']), (78, 7, data['f24']),
(79, 7, data['f25']), (80, 7, data['f26']), (81, 7, data['f27']),
(82, 7, data['f28']), (83, 7, data['f29']), (84, 7, data['f30']),
(85, 7, data['f31']), (86, 7, data['f32'])]
for sublist in lwh.mk_optional_int(87, 224, data, 'fd7'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(88, 225, data, 'fd8'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(89, 226, data, 'fd14'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(90, 227, data, 'fd15'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(91, 228, data, 'fd26'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(92, 229, data, 'fd29'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(93, 230, data, 'fd30'):
val_list.append(sublist)
for sublist in lwh.mk_optional_int(94, 231, data, 'fd32'):
val_list.append(sublist)
return val_list
|
42717f4d182b3df60e27f213c36278c894597ded
| 3,639,261
|
def valid_distro(x):
"""
Validates that arg is a Distro type, and has
:param x:
:return:
"""
if not isinstance(x, Distro):
return False
result = True
for required in ["arch", "variant"]:
val = getattr(x, required)
if not isinstance(val, str):
result = False
elif val.strip() == "":
result = False
return result
|
8fc68700a4d024b7ba756c186225ef22622db584
| 3,639,262
|
def encode(message):
"""
Кодирует строку в соответсвие с таблицей азбуки Морзе
>>> encode('MAI-PYTHON-2020') # doctest: +SKIP
'-- .- .. -....-
.--. -.-- - .... --- -. -....-
..--- ----- ..--- -----'
>>> encode('SOS')
'...
---
...'
>>> encode('МАИ-ПИТОН-2020') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: 'М'
"""
encoded_signs = [
LETTER_TO_MORSE[letter] for letter in message
]
return ' '.join(encoded_signs)
|
efa312c510738f89608af0febff3435b17235eb8
| 3,639,264
|
def get_group_to_elasticsearch_processor():
"""
This processor adds users from xform submissions that come in to the User Index if they don't exist in HQ
"""
return ElasticProcessor(
elasticsearch=get_es_new(),
index_info=GROUP_INDEX_INFO,
)
|
12e9371282298c96968263e76d1d02848fc5dcb3
| 3,639,265
|
import torch
def loss_function(recon_x, x, mu, logvar, flattened_image_size = 1024):
"""
from https://github.com/pytorch/examples/blob/master/vae/main.py
"""
BCE = nn.functional.binary_cross_entropy(recon_x, x.view(-1, flattened_image_size), reduction='sum')
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + KLD
|
73abe5c0944f646b4c9240fdb80e17cabf83a22d
| 3,639,266
|
def remove_poly(values, poly_fit=0):
"""
Calculates best fit polynomial and removes it from the record
"""
x = np.linspace(0, 1.0, len(values))
cofs = np.polyfit(x, values, poly_fit)
y_cor = 0 * x
for co in range(len(cofs)):
mods = x ** (poly_fit - co)
y_cor += cofs[co] * mods
return values - y_cor
|
3699dcd3cae6021a5f2a0b4cad08882a4383d09c
| 3,639,267
|
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=device,
invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
hooks.append(inputs.dataset_initializer_hook())
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels, signals)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
|
a632fac96d555d3ce21d75183c00c6e7627ba5ac
| 3,639,268
|
def SogouNews(*args, **kwargs):
""" Defines SogouNews datasets.
The labels includes:
- 0 : Sports
- 1 : Finance
- 2 : Entertainment
- 3 : Automobile
- 4 : Technology
Create supervised learning dataset: SogouNews
Separately returns the training and test dataset
Args:
root: Directory where the datasets are saved. Default: ".data"
ngrams: a contiguous sequence of n items from s string text.
Default: 1
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
include_unk: include unknown token in the data (Default: False)
Examples:
>>> train_dataset, test_dataset = torchtext.datasets.SogouNews(ngrams=3)
"""
return _setup_datasets(*(("SogouNews",) + args), **kwargs)
|
e10eaf10ba6e999d40a40f09f7e79b47eb5aa8a5
| 3,639,270
|
def add_volume (activity_cluster_df,
activity_counts):
"""Scales log of session counts of each activity and merges into activities dataframe
Parameters
----------
activity_cluster_df : dataframe
Pandas dataframe of activities, skipgrams features, and cluster label from DBSCAN
activity_counts: dictionary
Dictionary (from activities.create_corpus func) of activity and session counts
Returns
-------
pandas dataframe of activities, skipgrams features, x-value, y-value, and activity volume percentiles
"""
assert isinstance(activity_counts, dict) == True, "activity_counts should be a dictionary."
assert len(activity_counts) >= len(activity_cluster_df), "activity_counts must contain the same number or more activity entries than activity_cluster_df."
# Map activities to capture unique session ID acount in activities dataframe
activity_cluster_df['volume_pctl'] = activity_cluster_df.index.map(activity_counts)
# Replace absolute volume with percentile rank integer
activity_cluster_df['volume_pctl'] = activity_cluster_df['volume_pctl'].rank(pct=True) * 100
return activity_cluster_df
|
1ea67909e2c48500ca2f022a3ae5ebcbe28da6c8
| 3,639,271
|
def polyadd(c1, c2):
"""
Add one polynomial to another.
Returns the sum of two polynomials `c1` + `c2`. The arguments are
sequences of coefficients from lowest order term to highest, i.e.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to high.
Returns
-------
out : ndarray
The coefficient array representing their sum.
See Also
--------
polysub, polymul, polydiv, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> sum = P.polyadd(c1,c2); sum
array([ 4., 4., 4.])
>>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2)
28.0
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
|
0dc8327abf94126fca5bbcc836bc1c404c92148e
| 3,639,274
|
def weighted_categorical_crossentropy(target, output, n_classes = 3, axis = None, from_logits=False):
"""Categorical crossentropy between an output tensor and a target tensor.
Automatically computes the class weights from the target image and uses
them to weight the cross entropy
# Arguments
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
# Returns
Output tensor.
"""
# Note: tf.nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if axis is None:
axis = len(output.get_shape()) - 1
if not from_logits:
# scale preds so that the class probas of each sample sum to 1
output /= tf.reduce_sum(output,
axis=axis,
keep_dims=True)
# manual computation of crossentropy
_epsilon = _to_tensor(K.epsilon(), output.dtype.base_dtype)
output = tf.clip_by_value(output, _epsilon, 1. - _epsilon)
target_cast = tf.cast(target, K.floatx())
class_weights = 1.0/np.float(n_classes)*tf.divide(tf.reduce_sum(target_cast), tf.reduce_sum(target_cast, axis = [0,1,2]))
print class_weights.get_shape()
return - tf.reduce_sum(tf.multiply(target * tf.log(output), class_weights), axis=axis)
else:
raise Exception("weighted_categorical_crossentropy cannot take logits")
|
e7fe2c583b4158afe5c04632c53402af1c64cc20
| 3,639,275
|
from django.conf import settings
def get_config(key, default):
"""
Get the dictionary "IMPROVED_PERMISSIONS_SETTINGS"
from the settings module.
Return "default" if "key" is not present in
the dictionary.
"""
config_dict = getattr(settings, 'IMPROVED_PERMISSIONS_SETTINGS', None)
if config_dict:
if key in config_dict:
return config_dict[key]
return default
|
8e4d03b71f568e6c3450e6674d16624ae44181a8
| 3,639,276
|
def prefetched_iterator(query, chunk_size=2000):
"""
This is a prefetch_related-safe version of what iterator() should do.
It will sort and batch on the default django primary key
Args:
query (QuerySet): the django queryset to iterate
chunk_size (int): the size of each chunk to fetch
"""
# walk the records in ascending id order
base_query = query.order_by("id")
def _next(greater_than_id):
"""Returns the next batch"""
return base_query.filter(id__gt=greater_than_id)[:chunk_size]
batch = _next(0)
while batch:
item = None
# evaluate each batch query here
for item in batch:
yield item
# next batch starts after the last item.id
batch = _next(item.id) if item is not None else None
|
e8a8feeea8073161283018f19de742c9425e2f94
| 3,639,278
|
def dicom_strfname( names: tuple) -> str:
"""
doe john s -> dicome name (DOE^JOHN^S)
"""
return "^".join(names)
|
864ad0d4c70c9bb4acbc65c92bf83a97415b9d35
| 3,639,280
|
import json
def plot_new_data(logger):
"""
Plots mixing ratio data, creating plot files and queueing the files for upload.
This will plot data, regardless of if there's any new data since it's not run continously.
:param logger: logging logger to record to
:return: bool, True if ran corrected, False if exit on error
"""
logger.info('Running plot_new_data()')
try:
engine, session = connect_to_db(DB_NAME, CORE_DIR)
except Exception as e:
logger.error(f'Error {e.args} prevented connecting to the database in plot_new_data()')
return False
remotedir = BOULDAIR_BASE_PATH + '/MR_plots'
compounds_to_plot = (session.query(Quantification.name)
.join(Standard, Quantification.standard_id == Standard.id)
.filter(Standard.name == 'quantlist').all())
compounds_to_plot[:] = [q.name for q in compounds_to_plot]
date_limits, major_ticks, minor_ticks = create_monthly_ticks(6, days_per_minor=7)
with open(JSON_PUBLIC_DIR / 'zug_plot_info.json', 'r') as file:
compound_limits = json.loads(file.read())
for name in compounds_to_plot:
params = (GcRun.date, Compound.mr)
filters = (
Compound.name == name,
GcRun.date >= date_limits['left'],
*ambient_filters
)
results = abstract_query(params, filters, GcRun.date)
dates = [r.date for r in results]
mrs = [r.mr for r in results]
p = MixingRatioPlot(
{name: (dates, mrs)},
limits={**date_limits, **compound_limits[name]},
major_ticks=major_ticks,
minor_ticks=minor_ticks,
filepath=MR_PLOT_DIR / f'{name}_plot.png'
)
p.plot()
file_to_upload = FileToUpload(p.filepath, remotedir, staged=True)
add_or_ignore_plot(file_to_upload, session)
session.commit()
session.close()
engine.dispose()
return True
|
186b11d496c8b1097087f451e43d235b40d7a2ba
| 3,639,281
|
def plot_graphs(graphs=compute_graphs()):
""" Affiche les graphes avec la bibliothèque networkx """
GF, Gf = graphs
pos = {1: (2, 1), 2: (4, 1), 3: (5, 2), 4: (4, 3), 5: (1, 3), 6: (1, 2), 7: (3, 4)}
plt.figure(1)
nx.draw_networkx_nodes(GF, pos, node_size=500)
nx.draw_networkx_labels(GF, pos)
nx.draw_networkx_edges(GF, pos, arrows=True)
plt.title("Graphe fort")
plt.show() # display
plt.figure(2)
nx.draw_networkx_nodes(Gf, pos, node_size=500)
nx.draw_networkx_labels(Gf, pos)
nx.draw_networkx_edges(Gf, pos, arrows=True, style="dashed")
plt.title("Graphe faible")
plt.show() # display
return GF, Gf
|
4db21b3f5a823b5a7a17264a611435d2aa3825a4
| 3,639,282
|
def get_polygon_name(polygon):
"""Returns the name for a given polygon.
Since not all plygons store their name in the same field, we have to figure
out what type of polygon it is first, then reference the right field.
Args:
polygon: The polygon object to get the name from.
Returns:
The name for that polygon object.
"""
if isinstance(polygon, StatePolygon):
name = polygon.name
elif isinstance(polygon, CountyPolygon):
if polygon.geo_code < 10000000:
name = polygon.name[:-5]
else:
name = polygon.name + ' County'
elif isinstance(polygon, PumaPolygon):
name = polygon.puma_name[:-5]
return name
|
da89efece12fbb27a5ceafef83b73ade392644cb
| 3,639,283
|
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return redirect("/login")
# Ensure password was submitted
elif not request.form.get("password"):
return redirect("/login")
# Query database for username
username = request.form.get("username")
rows = list(db.execute(f"SELECT * FROM users WHERE name = '{username}'"))
# Ensure username exists and password is correct
pass_string = request.form.get("password")
if len(rows) != 1 or not check_password_hash(rows[0][2], pass_string):
return redirect("/login")
# Remember which user has logged in
session["user_id"] = rows[0][0]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
|
8699a3f0f162706c2e0a0ab9565b8b595cbb7574
| 3,639,284
|
from . import paval as pv
import configparser
def read_option(file_path, section, option, fallback=None):
"""
Parse config file and read out the value of a certain option.
"""
try:
# For details see the notice in the header
pv.path(file_path, "config", True, True)
pv.string(section, "section string")
pv.string(option, "option string")
except NameError:
pass
c = configparser.RawConfigParser()
c.read(file_path)
value = ""
try:
value = c.get(section, option)
except configparser.NoSectionError:
if fallback:
return str(fallback)
else:
raise Exception("This section does not exist in the given " \
"config file.")
except configparser.NoOptionError:
if fallback:
return str(fallback)
else:
raise Exception("This option does not exist in the given " \
"section.")
return str(value)
|
6a9b839e36509630813c3cab5e45402b37377837
| 3,639,285
|
import json
def msg_to_json(msg: Msg) -> json.Data:
"""Convert message to json serializable data"""
return {'facility': msg.facility.name,
'severity': msg.severity.name,
'version': msg.version,
'timestamp': msg.timestamp,
'hostname': msg.hostname,
'app_name': msg.app_name,
'procid': msg.procid,
'msgid': msg.msgid,
'data': msg.data,
'msg': msg.msg}
|
ee01821bdbcdcbe88f5c63f0a1f22d050814aa7f
| 3,639,287
|
def get_direct_dependencies(definitions_by_node: Definitions, node: Node) -> Nodes:
"""Get direct dependencies of a node"""
dependencies = set([node])
def traverse_definition(definition: Definition):
"""Traverses a definition and adds them to the dependencies"""
for dependency in definition['nodes']:
if dependency not in dependencies:
dependencies.add(dependency)
for children_definition in definition['children_definitions']:
traverse_definition(children_definition)
traverse_definition(definitions_by_node[node])
dependencies.discard(node)
return dependencies
|
6dfbfd9068ecc3759764b3542be62f270c45e4c1
| 3,639,288
|
def get_timeseries_metadata(request, file_type_id, series_id, resource_mode):
"""
Gets metadata html for the aggregation type (logical file type)
:param request:
:param file_type_id: id of the aggregation (logical file) object for which metadata in html
format is needed
:param series_id: if of the time series for which metadata to be displayed
:param resource_mode: a value of either edit or view. In resource edit mode metadata html
form elements are returned. In view mode normal html for display of metadata is returned
:return: json data containing html string
"""
if resource_mode != "edit" and resource_mode != 'view':
err_msg = "Invalid metadata type request."
ajax_response_data = {'status': 'error', 'message': err_msg}
return JsonResponse(ajax_response_data, status=status.HTTP_400_BAD_REQUEST)
logical_file, json_response = _get_logical_file("TimeSeriesLogicalFile", file_type_id)
if json_response is not None:
return json_response
series_ids = logical_file.metadata.series_ids_with_labels
if series_id not in series_ids.keys():
# this will happen only in case of CSV file upload when data is written
# first time to the blank sqlite file as the series ids get changed to
# uuids
series_id = series_ids.keys()[0]
try:
if resource_mode == 'view':
metadata = logical_file.metadata.get_html(series_id=series_id)
else:
metadata = logical_file.metadata.get_html_forms(series_id=series_id)
ajax_response_data = {'status': 'success', 'metadata': metadata}
except Exception as ex:
ajax_response_data = {'status': 'error', 'message': ex.message}
return JsonResponse(ajax_response_data, status=status.HTTP_200_OK)
|
056707f6bd1947dd227c61dccb99b4f9d46ce9c9
| 3,639,289
|
def standardize(tag):
"""Put an order-numbering ID3 tag into our standard form.
This function does nothing when applied to a non-order-numbering tag.
Args:
tag: A mutagen ID3 tag, which is modified in-place.
Returns:
A 2-tuple with the decoded version of the order string.
raises:
BadOrderError: if the tag is obviously bad.
"""
if not _is_order_tag(tag):
return
tag.text[0] = standardize_str(tag.text[0])
return decode(tag.text[0])
|
66edb2f402e2781deaf39ae470b5f3c54411c1c3
| 3,639,290
|
def _count_objects(osm_pbf):
"""Count objects of each type in an .osm.pbf file."""
p = run(["osmium", "fileinfo", "-e", osm_pbf], stdout=PIPE, stderr=DEVNULL)
fileinfo = p.stdout.decode()
n_objects = {"nodes": 0, "ways": 0, "relations": 0}
for line in fileinfo.split("\n"):
for obj in n_objects:
if f"Number of {obj}" in line:
n_objects[obj] = int(line.split(":")[-1])
return n_objects
|
f3792b457e3cc922b6df3cef69dfb4c8d00c68d9
| 3,639,291
|
def combine_multi_uncertainty(unc_lst):
"""Combines Uncertainty Values From More Than Two Sources"""
ur = 0
for i in range(len(unc_lst)):
ur += unc_lst[i] ** 2
ur = np.sqrt(float(ur))
return ur
|
6f06afc7bda7d65b8534e7294411dbe5e499b755
| 3,639,292
|
def export_performance_df(
dataframe: pd.DataFrame, rule_name: str = None, second_df: pd.DataFrame = None, relationship: str = None
) -> pd.DataFrame:
"""
Function used to calculate portfolio performance for data after calculating a trading signal/rule and relationship.
"""
if rule_name is not None:
if rule_name in algorithm_functions["infertrade"]["allocation"].keys():
used_calculation = calculate_infertrade_allocation
elif rule_name in algorithm_functions["ta"]["signal"].keys():
used_calculation = calculate_ta_signal
elif rule_name in algorithm_functions["ta"]["allocation"].keys():
used_calculation = calculate_ta_allocation
elif rule_name in algorithm_functions["infertrade"]["signal"].keys():
used_calculation = calculate_infertrade_signal
elif rule_name in ta_export_regression_allocations.keys():
used_calculation = calculate_ta_regression_allocation
elif rule_name not in algorithm_functions:
raise ValueError("Algorithm not found")
df_with_performance = used_calculation(dataframe=dataframe, rule_name=rule_name)
else:
df_with_performance = dataframe
if relationship is not None:
if second_df is not None:
if rule_name is not None:
second_df_with_performance = used_calculation(dataframe=second_df, rule_name=rule_name)
else:
second_df_with_performance = second_df
second_df_with_relationship = calculate_infertrade_allocation(
dataframe=second_df_with_performance, rule_name=relationship
)
df_with_relationship = calculate_infertrade_allocation(
dataframe=df_with_performance, rule_name=relationship
)
complete_relationship = df_with_relationship.append(second_df_with_relationship, ignore_index=False)
return complete_relationship
else:
df_with_relationship = calculate_infertrade_allocation(
dataframe=df_with_performance, rule_name=relationship
)
return df_with_relationship
else:
return df_with_performance
|
e0587a658aab2e629bff7c307e5f1aaec63a80fe
| 3,639,293
|
def attention(x, scope, n_head, n_timesteps):
"""
perform multi-head qkv dot-product attention and linear project result
"""
n_state = x.shape[-1].value
with tf.variable_scope(scope):
queries = conv1d(x, 'q', n_state)
keys = conv1d(x, 'k', n_state)
values = conv1d(x, 'v', n_state)
# note that split/merge heads is fused into attention ops (no resahpe/transpose needed)
bst = get_blocksparse_attention_ops(n_timesteps, n_head)
attention_energies = bst.query_key_op(queries, keys)
attention_weights = bst.masked_softmax(attention_energies, scale=tf.rsqrt(n_state / n_head))
weighted_values = bst.weight_value_op(attention_weights, values)
result = conv1d(weighted_values, 'proj', n_state)
return result
|
63456ce40c4e72339638f460a8138dcd143e7352
| 3,639,294
|
def std_ver_minor_inst_valid_possible(std_ver_minor_uninst_valid_possible): # pylint: disable=redefined-outer-name
"""Return an instantiated IATI Version Number."""
return iati.Version(std_ver_minor_uninst_valid_possible)
|
9570918df11a63faf194da9db82aa4ea1745c920
| 3,639,295
|
def sequence_loss_by_example(logits, targets, weights,
average_across_timesteps=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits (per example).
Args:
logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: List of 1D batch-sized int32 Tensors of the same length as logits.
weights: List of 1D batch-sized float-Tensors of the same length as logits.
average_across_timesteps: If set, divide the returned cost by the total
label weight.
softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: Optional name for this operation, default: "sequence_loss_by_example".
Returns:
1D batch-sized float Tensor: The log-perplexity for each sequence.
Raises:
ValueError: If len(logits) is different from len(targets) or len(weights).
"""
if len(targets) != len(logits) or len(weights) != len(logits):
raise ValueError("Lengths of logits, weights, and targets must be the same "
"%d, %d, %d." % (len(logits), len(weights), len(targets)))
with tf.name_scope( name,
"sequence_loss_by_example",logits + targets + weights):
log_perp_list = []
for logit, target, weight in zip(logits, targets, weights):
if softmax_loss_function is None:
target = tf.reshape(target, [-1])
crossent = tf.sparse_softmax_cross_entropy_with_logits(
logit, target)
else:
crossent = softmax_loss_function(logit, target)
log_perp_list.append(crossent * weight)
log_perps = tf.add_n(log_perp_list)
if average_across_timesteps:
total_size = tf.add_n(weights)
total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.
log_perps /= total_size
return log_perps
|
adf8a063c6f41b41e174852466489f535c7e0761
| 3,639,296
|
def skip(line):
"""Returns true if line is all whitespace or shebang."""
stripped = line.lstrip()
return stripped == '' or stripped.startswith('#!')
|
4ecfb9c0f2d497d52cc9d9e772e75d042cc0bcce
| 3,639,297
|
def get_dss_client(deployment_stage: str):
"""
Returns appropriate DSSClient for deployment_stage.
"""
dss_env = MATRIX_ENV_TO_DSS_ENV[deployment_stage]
if dss_env == "prod":
swagger_url = "https://dss.data.humancellatlas.org/v1/swagger.json"
else:
swagger_url = f"https://dss.{dss_env}.data.humancellatlas.org/v1/swagger.json"
logger.info(f"ETL: Hitting DSS with Swagger URL: {swagger_url}")
dss_config = hca.HCAConfig()
dss_config['DSSClient'] = {}
dss_config['DSSClient']['swagger_url'] = swagger_url
client = hca.dss.DSSClient(config=dss_config)
return client
|
4e260b37c6f74261362cc10b77b3b28d1464d49d
| 3,639,298
|
def bounce_off(bounce_obj_rect: Rect, bounce_obj_speed,
hit_obj_rect: Rect, hit_obj_speed):
"""
The alternative version of `bounce_off_ip`. The function returns the result
instead of updating the value of `bounce_obj_rect` and `bounce_obj_speed`.
@return A tuple (`new_bounce_obj_rect`, `new_bounce_obj_speed`)
"""
new_bounce_obj_rect = bounce_obj_rect.copy()
new_bounce_obj_speed = bounce_obj_speed.copy()
bounce_off_ip(new_bounce_obj_rect, new_bounce_obj_speed,
hit_obj_rect, hit_obj_speed)
return new_bounce_obj_rect, new_bounce_obj_speed
|
84b038c05f5820065293ba90b73497f0d1e7a7b9
| 3,639,299
|
def second(lst):
"""Same as first(nxt(lst)).
"""
return first(nxt(lst))
|
aa49e089a06a4b3e7d781966d8b4f98b7fe15841
| 3,639,301
|
def gaussian_noise(height, width):
"""
Create a background with Gaussian noise (to mimic paper)
"""
# We create an all white image
image = np.ones((height, width)) * 255
# We add gaussian noise
cv2.randn(image, 235, 10)
return Image.fromarray(image).convert("RGBA")
|
6243fde57b3e7415edc2024eebbe10f059b93a55
| 3,639,302
|
def draw_box(image, box, color):
"""Draw 3-pixel width bounding boxes on the given image array.
color: list of 3 int values for RGB.
"""
y1, x1, y2, x2 = box
image[y1:y1 + 1, x1:x2] = color
image[y2:y2 + 1, x1:(x2+1)] = color
image[y1:y2, x1:x1 + 1] = color
image[y1:y2, x2:x2 + 1] = color
return image
|
4d1e713c6cb6a3297b4f7d8ab9682205947770da
| 3,639,303
|
def get_statuses_one_page(weibo_client, max_id=None):
"""获取一页发布的微博
"""
if max_id:
statuses = weibo_client.statuses.user_timeline.get(max_id=max_id)
else:
statuses = weibo_client.statuses.user_timeline.get()
return statuses
|
4a214489aa5696c9683c9cfa96d79ee169135eb5
| 3,639,304
|
def do_nothing(ax):
"""Do not add any watermark."""
return ax
|
6fbe32dc45ca1a945e1c45bf0319770c4d683397
| 3,639,305
|
def exec_lm_pipe(taskstr):
"""
Input: taskstr contains LM calls separated by ;
Used for execute config callback parameters (IRQs and BootHook)
"""
try:
# Handle config default empty value (do nothing)
if taskstr.startswith('n/a'):
return True
# Execute individual commands - msgobj->"/dev/null"
for cmd in (cmd.strip().split() for cmd in taskstr.split(';')):
if not exec_lm_core_schedule(cmd):
console_write("|-[LM-PIPE] task error: {}".format(cmd))
except Exception as e:
console_write("[IRQ-PIPE] error: {}\n{}".format(taskstr, e))
errlog_add('exec_lm_pipe error: {}'.format(e))
return False
return True
|
8854b5de0f408caf9292aecbcfa261744166e744
| 3,639,306
|
def term_size():
"""Print out a sequence of ANSI escape code which will report back the
size of the window.
"""
# ESC 7 - Save cursor position
# ESC 8 - Restore cursor position
# ESC [r - Enable scrolling for entire display
# ESC [row;colH - Move to cursor position
# ESC [6n - Device Status Report - send ESC [row;colR
repl= None
if 'repl_source' in dir(pyb):
repl = pyb.repl_source()
if repl is None:
repl = pyb.USB_VCP()
repl.send(b'\x1b7\x1b[r\x1b[999;999H\x1b[6n')
pos = b''
while True:
char = repl.recv(1)
if char == b'R':
break
if char != b'\x1b' and char != b'[':
pos += char
repl.send(b'\x1b8')
(height, width) = [int(i, 10) for i in pos.split(b';')]
return height, width
|
bc0b09163b48f821315f52c52b0a58b6b5fb977a
| 3,639,307
|
def get_dashboard(request, project_id):
"""
Load Project Dashboard to display Latest Cost Estimate and List of Changes
"""
project = get_object_or_404(Project, id=project_id)
# required to determine permission of user,
# if not a project user then project owner
try:
project_user = ProjectUser.objects.get(
project=project, project_user=request.user)
except ProjectUser.DoesNotExist:
project_user = None
form = ChangeForm()
attachmentsForm = ChangeAttachmentsForm()
changes = Change.objects.filter(project_id=project_id)
# Calculations to display on dashboard
original_estimate = project.original_estimate
accepted_changes = Change.objects.filter(
project_id=project_id, change_status="A").aggregate(
Sum('change_cost'))['change_cost__sum']
if accepted_changes is None:
accepted_changes = 0
pending_changes = Change.objects.filter(
project_id=project_id, change_status="P").aggregate(
Sum('change_cost'))['change_cost__sum']
if pending_changes is None:
pending_changes = 0
wip_changes = Change.objects.filter(
project_id=project_id, change_status="WiP").aggregate(
Sum('change_cost'))['change_cost__sum']
if wip_changes is None:
wip_changes = 0
rejected_changes = Change.objects.filter(
project_id=project_id, change_status="R").aggregate(
Sum('change_cost'))['change_cost__sum']
if rejected_changes is None:
rejected_changes = 0
subtotal = original_estimate + accepted_changes
total = subtotal + pending_changes + wip_changes
context = {
'project': project,
'project_user': project_user,
'form': form,
'attachmentsForm': attachmentsForm,
'changes': changes,
'original_estimate': original_estimate,
'accepted_changes': accepted_changes,
'pending_changes': pending_changes,
'wip_changes': wip_changes,
'rejected_changes': rejected_changes,
'subtotal': subtotal,
'total': total,
}
return render(request, 'dashboard/project.html', context)
|
36257741b2ef220d35e4593bd080a82b4cc743a0
| 3,639,308
|
def _scan_real_end_loop(bytecode, setuploop_inst):
"""Find the end of loop.
Return the instruction offset.
"""
start = setuploop_inst.next
end = start + setuploop_inst.arg
offset = start
depth = 0
while offset < end:
inst = bytecode[offset]
depth += inst.block_effect
if depth < 0:
return inst.next
offset = inst.next
|
9cff8ab77563a871b86cdbb14236603ec58e04b6
| 3,639,309
|
def six_node_range_5_to_0_bst():
"""Six nodes covering range five to zero."""
b = BST([5, 4, 3, 2, 1, 0])
return b
|
1afe6c613b03def6dc9d8aed41624e40180e5ae5
| 3,639,310
|
def IndividualsInAlphabeticOrder(filename):
"""Checks if the names are in alphabetic order"""
with open(filename, 'r') as f:
lines = f.readlines()
individual_header = '# Individuals:\n'
if individual_header in lines:
individual_authors = lines[lines.index(individual_header) + 1:]
sorted_authors = sorted(individual_authors, key=str.casefold)
if sorted_authors == individual_authors:
print("Individual authors are sorted alphabetically.")
return True
else:
print("Individual authors are not sorted alphabetically."
" The expected order is:")
print(''.join(sorted_authors))
return False
else:
print("Cannot find line '# Individuals:' in file.")
return False
|
4753bbf41498373695f921555c8f01183dbb58dc
| 3,639,311
|
import mxnet
from mxnet.gluon.data.vision import transforms
from PIL import Image
def preprocess_img_imagenet(img_path):
"""Preprocessing required for ImageNet classification.
Reference:
https://github.com/onnx/models/tree/master/vision/classification/vgg
"""
img = Image.open(img_path)
img = mxnet.ndarray.array(img)
transform_fn = transforms.Compose(
[
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
img = transform_fn(img)
img = img.expand_dims(axis=0) # Batchify.
return img.asnumpy()
|
f181e3376f26ee14c6314a8a730e796eefb09e2e
| 3,639,312
|
def create_lambertian(color):
"""
create a lambertion material
"""
material = bpy.data.materials.new(name="Lambertian")
material.use_nodes = True
nodes = material.node_tree.nodes
# remove principled
material.node_tree.nodes.remove(
material.node_tree.nodes.get('Principled BSDF'))
# get material output
material_output = material.node_tree.nodes.get('Material Output')
# Add a diffuse shader and set its location:
diffuse_node = nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.inputs['Color'].default_value = color
# link diffuse shader to material
material.node_tree.links.new(
material_output.inputs[0], diffuse_node.outputs[0])
return material
|
e291817853ec26d6767d8fd496ee5ced15ff87f2
| 3,639,313
|
def submission_view(request, locker_id, submission_id):
"""Displays an individual submission"""
submission = get_object_or_404(Submission, pk=submission_id)
newer = submission.newer()
newest = Submission.objects.newest(submission.locker)
if not newest:
newest = submission
oldest = Submission.objects.oldest(submission.locker)
if not oldest:
oldest = submission
older = submission.older()
discussion_enabled = submission.locker.discussion_enabled()
is_owner = submission.locker.owner == request.user
users_discussion = submission.locker.discussion_users_have_access()
users_workflow = submission.locker.workflow_users_can_edit()
workflow_enabled = submission.locker.workflow_enabled()
# generate a message to the user if the submission is deleted
if submission.deleted:
messages.warning(request,
u'<strong>Heads up!</strong> This submission has '
u'been deleted and <strong>will be permanently '
u'removed</strong> from the locker '
u'<strong>{}</strong>.'
u''.format(naturaltime(submission.purge_date)))
return render(request, 'datalocker/submission_view.html', {
'data': submission.data_dict(with_types=True),
'discussion_enabled': discussion_enabled,
'discussion_users_have_access': users_discussion or is_owner,
'newer': newer,
'newer_disabled': True if submission.id == newer.id else False,
'newest': newest,
'newest_disabled': True if submission.id == newest.id else False,
'older': older,
'older_disabled': True if submission.id == older.id else False,
'oldest': oldest,
'oldest_disabled': True if submission.id == oldest.id else False,
'sidebar_enabled': workflow_enabled or discussion_enabled,
'submission': submission,
'workflow_enabled': workflow_enabled,
'workflow_states': submission.locker.workflow_states(),
'workflow_state': submission.workflow_state,
'workflow_users_can_edit': users_workflow or is_owner,
})
|
f473c7ad2c59dfd27a96fa4478f6b9652e740296
| 3,639,314
|
from pathlib import Path
def add_filename_suffix(file_path: str, suffix: str) -> str:
"""
Append a suffix at the filename (before the extension).
Args:
path: pathlib.Path The actual path object we would like to add a suffix
suffix: The suffix to add
Returns: path with suffix appended at the end of the filename and before extension
"""
path = Path(file_path)
return str(path.parent.joinpath(path.stem + suffix).with_suffix(path.suffix))
|
546bb95f694ee5d5cb26873428fcac8453df6a54
| 3,639,315
|
def list_dropdownTS(dic_df):
"""
input a dictionary containing what variables to use, and how to clean
the variables
It outputs a list with the possible pair solutions.
This function will populate a dropdown menu in the eventHandler function
"""
l_choice = []
for key_cat, value_cat in dic_df['var_continuous'].items():
l_choice.append(value_cat['name'])
l_choice = ['-'] + l_choice
return l_choice
|
fcd0474fa6941438cb39c63aa7605f1b776fd538
| 3,639,316
|
import itertools
import random
def get_voice_combinations(**kwargs):
"""
Gets k possible combinations of voices from a list of voice indexes. If k is None, it will return all possible
combinations. The combinations are of a minimum size min_n_voices_to_remove and a max size
max_n_voices_to_remove. When choosing a k number a combinations from all possible combinations, the probability
of choosing a combination of a number of voices above another can be passed with the prob list, where for a range
of voices to remove from 1 to 3, [1, 1, 1] indicates equal probability, [1,1,2] indicates that combinations with
3 voices have double probability of getting chosen, etc.
@param kwargs: see below
@return voice_idx_comb: combinations of voice indexes
"""
# list of voices to remove
voice_idx = kwargs.get("voice_idx", [0, 1, 2, 3, 4])
min_n_voices_to_remove = kwargs.get(
"min_n_voices_to_remove", 1) # min size of the combination
max_n_voices_to_remove = kwargs.get(
"max_n_voices_to_remove", 3) # max size of the combination
# prob of each n_voices_to_remove set in ascending order
prob = kwargs.get("prob", [1, 1, 1])
k = kwargs.get("k", 5) # max number of combinations to return
if len(voice_idx) < max_n_voices_to_remove:
max_n_voices_to_remove = len(voice_idx)
range_items = range(min_n_voices_to_remove, max_n_voices_to_remove + 1)
assert (len(prob) == len(
range_items)), "The prob list must be the same length as the range(min_n_voices_to_remove, max_n_voices_to_remove)"
voice_idx_comb = []
weights = []
for i, n_voices_to_remove in enumerate(range_items):
_voice_idx_comb = list(itertools.combinations(
voice_idx, n_voices_to_remove))
voice_idx_comb.extend(_voice_idx_comb)
_weights = list(np.repeat(prob[i], len(_voice_idx_comb)))
weights.extend(_weights)
if k is not None: # if there is no k, return all possible combinations
voice_idx_comb = random.choices(voice_idx_comb, weights=weights, k=k)
return list(voice_idx_comb)
|
d3addbfe5023b5ee6e25f190c53b469593bb9ff4
| 3,639,317
|
def data(request):
"""This is a the main entry point to the Data tab."""
context = cache.get("data_tab_context")
if context is None:
context = data_context(request)
cache.set("data_tab_context", context, 29)
return render(request, "rundb/data/data.html", context)
|
2763617afc7d865acaf3f0dcbf9190bd084ad5ae
| 3,639,318
|
import typing
import pathlib
import pickle
def from_pickle(
filepath: typing.Union[str, pathlib.Path, typing.IO[bytes]]
) -> typing.Union[Categorization, HierarchicalCategorization]:
"""De-serialize Categorization or HierarchicalCategorization from a file written by
to_pickle.
Note that this uses the pickle module, which executes arbitrary code in the
provided file. Only load from pickle files that you trust."""
try:
spec = pickle.load(filepath)
except TypeError:
with open(filepath, "rb") as fd:
spec = pickle.load(fd)
return from_spec(spec)
|
e268f8c1467965bbba47c65ebba5f021171fc6ce
| 3,639,320
|
def recostruct(encoded, weights, bias):
"""
Reconstructor : Encoded -> Original
Not Functional
"""
weights.reverse()
for i,item in enumerate(weights):
encoded = encoded @ item.eval() + bias[i].eval()
return encoded
|
e17aeb6a819a6eec745c5dd811460049fa4a92cd
| 3,639,321
|
import math
def get_file_dataset_from_trixel_id(CatName,index,NfilesinHDF,Verbose=True):#get_file_var_from_htmid in Eran's library
"""Description: given a catalog basename and the index of a trixel and the number of trixels in an HDF5 file,
create the trixel dataset name
Input :- CatName
- index
- NfilesinHDF: number of datasets in an HDF5 files (default is 100)
Output :- Filename: name of the HDF5 file where the trixel_dataset is stored
- Datasetname: name of the trixel_dataset
example:
By : Maayane Soumagnac (original Matlab function by Eran Ofek) August 2018"""
if Verbose==True:
print('index is',index)
num_file=math.floor(index/NfilesinHDF)*NfilesinHDF #equivalent to index//Nfiles*Nfiles
Filename='%s_htm_%06d.hdf5' % (CatName, num_file)
DatasetName='htm_%06d' % index
return Filename,DatasetName
|
b9d0482780ae2a191175f1549513f46c047bb1cf
| 3,639,322
|
def calc_element_column(NH, fmineral, atom, mineral, d2g=0.009):
"""
Calculate the column density of an element for a particular NH value,
assuming a dust-to-gas ratio (d2g) and the fraction of dust in that
particular mineral species (fmineral)
"""
dust_mass = NH * mp * d2g * fmineral # g cm^{-2}
print('Dust mass = %.3e g cm^-2' % (dust_mass))
return calc_mass_conversion(atom, mineral) * dust_mass
|
d1e24602e6d329132d59f300543f306502867fc1
| 3,639,323
|
def output_dot(sieve, column_labels=None, max_edges=None, filename='structure.dot'):
""" A network representation of the structure in Graphviz format. Units in the produced file
are in bits. Weight is the mutual information and tc is the total correlation.
"""
print """Compile by installing graphviz and running a command like:
sfdp %s -Tpdf -Earrowhead=none -Nfontsize=12 \\
-GK=2 -Gmaxiter=1000 -Goverlap=False -Gpack=True \\
-Gpackmode=clust -Gsep=0.02 -Gratio=0.7 -Gsplines=True -o structure.pdf""" % filename
if column_labels is None:
column_labels = map(unicode, range(sieve.n_variables))
else:
column_labels = map(unicode, column_labels)
f = open(filename, 'w')
f.write('strict digraph {\n'.encode('utf-8'))
for i, column_label in enumerate(column_labels):
line = '%s [label="%s", shape=none]\n' % ('X_' + column_label, column_label)
f.write(line.encode('utf-8'))
for j, layer in enumerate(sieve.layers):
this_tc = 0.6 * sieve.tcs[j] / np.max(sieve.tcs)
line = 'Y_%d [shape=circle,margin="0,0",style=filled,fillcolor=black,' \
'fontcolor=white,height=%0.3f,label=Y%d,tc=%0.3f]\n' % (j, this_tc, j+1, sieve.tcs[j] / np.log(2))
f.write(line.encode('utf-8'))
mis = sieve.mis
print 'mis', mis
if max_edges is None or max_edges > mis.size:
w_threshold = 0.
else:
w_threshold = -np.sort(-np.ravel(mis))[max_edges]
for j, layer in enumerate(sieve.layers):
for i in range(sieve.n_variables):
w = mis[j, i] / np.log(2)
if w > w_threshold:
line = '%s -> %s [penwidth=%0.3f, weight=%0.3f];\n' % ('X_'+str(i), 'Y_'+str(j), 2 * w, w)
f.write(line.encode('utf-8'))
for j2 in range(0, j):
w = mis[j, sieve.n_variables + j2] / np.log(2)
if w > w_threshold:
line = '%s -> %s [penwidth=%0.3f, weight=%0.3f];\n' % ('Y_'+str(j2), 'Y_'+str(j), 2 * w, w)
f.write(line.encode('utf-8'))
f.write('}'.encode('utf-8'))
f.close()
return True
|
aa63e5ffb0bd1544f29391821db9ac49e690e3fe
| 3,639,324
|
def projectSimplex_vec(v):
""" project vector v onto the probability simplex
Parameter
---------
v: shape(nVars,)
input vector
Returns
-------
w: shape(nVars,)
projection of v onto the probability simplex
"""
nVars = v.shape[0]
mu = np.sort(v,kind='quicksort')[::-1]
sm_hist = np.cumsum(mu)
flag = (mu - 1./np.arange(1,nVars+1)*(sm_hist-1) > 0)
lastTrue = len(flag) - 1 - flag[::-1].argmax()
sm_row = sm_hist[lastTrue]
theta = 1./(lastTrue+1) * (sm_row - 1)
w = np.maximum(v-theta, 0.)
return w
|
ace378ed84c61e05e04fdad23e3d97127e63df3a
| 3,639,325
|
from typing import Collection
from typing import List
from typing import Sized
def render_list(something: Collection, threshold: int, tab: str) -> List[str]:
"""
Разложить список или что то подобное
"""
i = 1
sub_storage = []
order = '{:0' + str(len(str(len(something)))) + 'd}'
for element in something:
if isinstance(element, Sized) and len(element) > threshold:
add = []
render(element, threshold, add, tab + '\t')
sub_storage.extend(add)
else:
sub_storage.append(f'{tab}{order.format(i)}| {element!r}')
i += 1
return sub_storage
|
a7eb47df956fc4404bae6e29e75b280cd2b70cba
| 3,639,326
|
from typing import Optional
from typing import List
from typing import Tuple
def combine_result(
intent_metrics: IntentMetrics,
entity_metrics: EntityMetrics,
response_selection_metrics: ResponseSelectionMetrics,
interpreter: Interpreter,
data: TrainingData,
intent_results: Optional[List[IntentEvaluationResult]] = None,
entity_results: Optional[List[EntityEvaluationResult]] = None,
response_selection_results: Optional[
List[ResponseSelectionEvaluationResult]
] = None,
) -> Tuple[IntentMetrics, EntityMetrics, ResponseSelectionMetrics]:
"""Collects intent, response selection and entity metrics for cross validation
folds.
If `intent_results`, `response_selection_results` or `entity_results` is provided
as a list, prediction results are also collected.
Args:
intent_metrics: intent metrics
entity_metrics: entity metrics
response_selection_metrics: response selection metrics
interpreter: the interpreter
data: training data
intent_results: intent evaluation results
entity_results: entity evaluation results
response_selection_results: reponse selection evaluation results
Returns: intent, entity, and response selection metrics
"""
(
intent_current_metrics,
entity_current_metrics,
response_selection_current_metrics,
current_intent_results,
current_entity_results,
current_response_selection_results,
) = compute_metrics(interpreter, data)
if intent_results is not None:
intent_results += current_intent_results
if entity_results is not None:
entity_results += current_entity_results
if response_selection_results is not None:
response_selection_results += current_response_selection_results
for k, v in intent_current_metrics.items():
intent_metrics[k] = v + intent_metrics[k]
for k, v in response_selection_current_metrics.items():
response_selection_metrics[k] = v + response_selection_metrics[k]
for extractor, extractor_metric in entity_current_metrics.items():
entity_metrics[extractor] = {
k: v + entity_metrics[extractor][k] for k, v in extractor_metric.items()
}
return intent_metrics, entity_metrics, response_selection_metrics
|
86942bbb30fe86fcd8e3453e7ac661b97832ec1a
| 3,639,327
|
import jobtracker
def get_fns_for_jobid(jobid):
"""Given a job ID number, return a list of that job's data files.
Input:
jobid: The ID number from the job-tracker DB to get files for.
Output:
fns: A list of data files associated with the job ID.
"""
query = "SELECT filename " \
"FROM files, job_files " \
"WHERE job_files.file_id=files.id " \
"AND job_files.job_id=%d" % jobid
rows = jobtracker.query(query)
fns = [str(row['filename']) for row in rows]
return fns
|
ab867ec7b86981bfd06caf219b77fbb9410277ad
| 3,639,328
|
def linear_schedule(initial_value: float):
"""
Linear learning rate schedule.
:param initial_value: Initial learning rate.
:return: schedule that computes
current learning rate depending on remaining progress
"""
def func(progress_remaining: float) -> float:
"""
Progress will decrease from 1 (beginning) to 0.
:param progress_remaining:
:return: current learning rate
"""
return progress_remaining * initial_value
return func
|
afb0c9f050081f7e84728051535a899d9ece43f3
| 3,639,329
|
def download(os_list, software_list, dst):
"""
按软件列表下载其他部分
"""
if os_list is None:
os_list = []
arch = get_arch(os_list)
LOG.info('software arch is {0}'.format(arch))
results = {'ok': [], 'failed': []}
no_mindspore_list = [software for software in software_list if "MindSpore" not in software]
for software in no_mindspore_list:
res = download_software(software, dst, arch)
if res:
results['ok'].append(software)
continue
results['failed'].append(software)
return results
|
9def81d5c1f127cab08add62a16df35c2a9dbc80
| 3,639,330
|
import hashlib
def get_hash_bin(shard, salt=b"", size=0, offset=0):
"""Get the hash of the shard.
Args:
shard: A file like object representing the shard.
salt: Optional salt to add as a prefix before hashing.
Returns: Hex digetst of ripemd160(sha256(salt + shard)).
"""
shard.seek(0)
digest = partialhash.compute(shard, offset=offset, length=size, seed=salt,
hash_algorithm=hashlib.sha256)
shard.seek(0)
return ripemd160(digest).digest()
|
94c399d41b56598e4ecac3f0c2d917a226e9e9db
| 3,639,331
|
def boltzmann_statistic(
properties: ArrayLike1D,
energies: ArrayLike1D,
temperature: float = 298.15,
statistic: str = "avg",
) -> float:
"""Compute Boltzmann statistic.
Args:
properties: Conformer properties
energies: Conformer energies (a.u.)
temperature: Temperature (K)
statistic: Statistic to compute: 'avg', 'var' or 'std'
Returns:
result: Boltzmann statistic
"""
properties = np.array(properties)
# Get conformer weights
weights = boltzmann_weights(energies, temperature)
# Compute Boltzmann weighted statistic
result: float
if statistic == "avg":
result = np.average(properties, weights=weights)
elif statistic == "var":
avg = np.average(properties, weights=weights)
result = np.sum(weights * (properties - avg) ** 2)
elif statistic == "std":
avg = np.average(properties, weights=weights)
var = np.sum(weights * (properties - avg) ** 2)
result = np.sqrt(var)
return result
|
5c5ea2d9ff43e9e068856d73f1e6bdc1f53c42b0
| 3,639,332
|
def _check_n_pca_components(ica, _n_pca_comp, verbose=None):
"""Aux function"""
if isinstance(_n_pca_comp, float):
_n_pca_comp = ((ica.pca_explained_variance_ /
ica.pca_explained_variance_.sum()).cumsum()
<= _n_pca_comp).sum()
logger.info('Selected %i PCA components by explained '
'variance' % _n_pca_comp)
elif _n_pca_comp is None or _n_pca_comp < ica.n_components_:
_n_pca_comp = ica.n_components_
return _n_pca_comp
|
1295de84f6054cac3072e2ba861c291cf71fdb72
| 3,639,333
|
def model_fn():
"""
Renvoie un modèle Inception3 avec la couche supérieure supprimée et les poids pré-entraînés sur imagenet diffusés.
"""
model = InceptionV3(
include_top=False, # Couche softmax de classification supprimée
weights='imagenet', # Poids pré-entraînés sur Imagenet
# input_shape=(100,100,3), # Image de taille 100x100 en couleur (channel=3)
pooling='max' # Utilisation du max de pooling
)
model.set_weights(bc_model_weights.value)
return model
|
3ee68e9874025d94cc1d73cf4857fecf6241e415
| 3,639,335
|
def find_correspondance_date(index, csv_file):
"""
The method returns the dates reported in the csv_file for the i-subject
:param index: index corresponding to the subject analysed
:param csv_file: csv file where all the information are listed
:return date
"""
return csv_file.EXAMDATE[index]
|
915b9a493247f04fc1f62e614bc26b6c342783c8
| 3,639,336
|
def get_config(object_config_id):
"""
Returns current and previous config
:param object_config_id:
:type object_config_id: int
:return: Current and previous config in dictionary format
:rtype: dict
"""
fields = ('config', 'attr', 'date', 'description')
try:
object_config = ObjectConfig.objects.get(id=object_config_id)
except ObjectConfig.DoesNotExist:
return None
config = {}
for name in ['current', 'previous']:
_id = getattr(object_config, name)
if _id:
config[name] = get_object(ConfigLog, _id, fields, ['date'])
else:
config[name] = None
return config
|
5eb31025494dbcf17890f3ed9e7165232db9e087
| 3,639,337
|
import unicodedata
def normalize_to_ascii(char):
"""Strip a character from its accent and encode it to ASCII"""
return unicodedata.normalize("NFKD", char).encode("ascii", "ignore").lower()
|
592e59ae10bb8f9a04dffc55bcc2a1a3cefb5e7e
| 3,639,338
|
def verify_certificate_chain(certificate, intermediates, trusted_certs, logger):
"""
:param certificate: cryptography.x509.Certificate
:param intermediates: list of cryptography.x509.Certificate
:param trusted_certs: list of cryptography.x509.Certificate
Verify that the certificate is valid, according to the list of intermediates and trusted_certs.
Uses legacy crypto.X509 functions as no current equivalent in https://cryptography.io/en/latest/
See:
https://gist.github.com/uilianries/0459f59287bd63e49b1b8ef03b30d421#file-cert-check-py
:return: bool
"""
try:
#Create a certificate store and add your trusted certs
store = crypto.X509Store()
for tc in trusted_certs:
store.add_cert(crypto.X509.from_cryptography(tc))
# Create a certificate context using the store, to check any intermediate certificates
for i in intermediates:
logger.info('| verifying intermediate certificates')
i_X509 = crypto.X509.from_cryptography(i)
store_ctx = crypto.X509StoreContext(store, i_X509)
store_ctx.verify_certificate()
# no exception, so Intermediate verified - add the intermediate to the store
store.add_cert(i_X509)
# Validate certificate against (trusted + intermediate)
logger.info('| intermediates passed, verifying user certificate')
store_ctx = crypto.X509StoreContext(store, crypto.X509.from_cryptography(certificate))
# Verify the certificate, returns None if it can validate the certificate
store_ctx.verify_certificate()
logger.info('| user certificate passed')
return True
except crypto.X509StoreContextError as e:
logger.warning(e)
return False
|
5d96fa38f22a74ae270af3ab35fc90274ed487e0
| 3,639,339
|
import json
def update_strip_chart_data(_n_intervals, acq_state, chart_data_json_str,
samples_to_display_val, active_channels):
"""
A callback function to update the chart data stored in the chartData HTML
div element. The chartData element is used to store the existing data
values, which allows sharing of data between callback functions. Global
variables cannot be used to share data between callbacks (see
https://dash.plot.ly/sharing-data-between-callbacks).
Args:
_n_intervals (int): Number of timer intervals - triggers the callback.
acq_state (str): The application state of "idle", "configured",
"running" or "error" - triggers the callback.
chart_data_json_str (str): A string representation of a JSON object
containing the current chart data.
samples_to_display_val (float): The number of samples to be displayed.
active_channels ([int]): A list of integers corresponding to the user
selected active channel checkboxes.
Returns:
str: A string representation of a JSON object containing the updated
chart data.
"""
updated_chart_data = chart_data_json_str
samples_to_display = int(samples_to_display_val)
num_channels = len(active_channels)
if acq_state == 'running':
hat = globals()['_HAT']
if hat is not None:
chart_data = json.loads(chart_data_json_str)
# By specifying -1 for the samples_per_channel parameter, the
# timeout is ignored and all available data is read.
read_result = hat.a_in_scan_read(ALL_AVAILABLE, RETURN_IMMEDIATELY)
if ('hardware_overrun' not in chart_data.keys()
or not chart_data['hardware_overrun']):
chart_data['hardware_overrun'] = read_result.hardware_overrun
if ('buffer_overrun' not in chart_data.keys()
or not chart_data['buffer_overrun']):
chart_data['buffer_overrun'] = read_result.buffer_overrun
# Add the samples read to the chart_data object.
sample_count = add_samples_to_data(samples_to_display, num_channels,
chart_data, read_result)
# Update the total sample count.
chart_data['sample_count'] = sample_count
updated_chart_data = json.dumps(chart_data)
elif acq_state == 'configured':
# Clear the data in the strip chart when Configure is clicked.
updated_chart_data = init_chart_data(num_channels, samples_to_display)
return updated_chart_data
|
67902561bc4d0cec2a1ac2f8d385a2accf4c03e9
| 3,639,340
|
import uuid
def genuuid():
"""Generate a random UUID4 string."""
return str(uuid.uuid4())
|
c664a9bd45f0c00dedf196bb09a09c6cfaf0d54b
| 3,639,341
|
def watsons_f(DI1, DI2):
"""
calculates Watson's F statistic (equation 11.16 in Essentials text book).
Parameters
_________
DI1 : nested array of [Dec,Inc] pairs
DI2 : nested array of [Dec,Inc] pairs
Returns
_______
F : Watson's F
Fcrit : critical value from F table
"""
# first calculate R for the combined data set, then R1 and R2 for each individually.
# create a new array from two smaller ones
DI = np.concatenate((DI1, DI2), axis=0)
fpars = fisher_mean(DI) # re-use our functionfrom problem 1b
fpars1 = fisher_mean(DI1)
fpars2 = fisher_mean(DI2)
N = fpars['n']
R = fpars['r']
R1 = fpars1['r']
R2 = fpars2['r']
F = (N-2.)*((R1+R2-R)/(N-R1-R2))
Fcrit = fcalc(2, 2*(N-2))
return F, Fcrit
|
db1f6be50657f4721aac4f800b7896afcbd71db7
| 3,639,342
|
def encode(integer_symbol, bit_count):
""" Returns an updated version of the given symbol list with the given symbol encoded into binary.
- `symbol_list` - the list onto which to encode the value.
- `integer_symbol` - the integer value to be encoded.
- `bit_count` - the number of bits from the end of the symbol list to decode.
"""
assert type(integer_symbol) == int and integer_symbol >= 0, "The given symbol must be an integer greater than or equal to zero."
# Convert the symbol into a bit string.
bit_string = bin(integer_symbol)
# Strip off any '0b' prefix.
if bit_string.startswith('0b'):
bit_string = bit_string[2:]
# end if
# Convert the string into a list of integers.
bits = [int(bit) for bit in list(bit_string)]
# Check that the number of bits is not bigger than the given bit count.
bits_length = len(bits)
assert bit_count >= bits_length, \
"The given %d bits to encode with is not enough to encode %d bits." % \
(bit_count, bits_length)
# Calculate how many bits we need to pad the bit string with, if any, and pad with zeros.
pad_list = [0 for i in xrange(0, bit_count - bits_length)]
# Return the newly created bit list, with the zero padding first.
symbol_list = pad_list + bits
return symbol_list
|
fe8fb04245c053bb4387b0ac594a778df5bce22c
| 3,639,343
|
def superkick(update, context):
"""Superkick a member from all rooms by replying to one of their messages with the /superkick command."""
bot = context.bot
user_id = update.message.from_user.id
boot_id = update.message.reply_to_message.from_user.id
username = update.message.reply_to_message.from_user.name
admin = _admin(user_id)
if not admin:
return _for_admin_only_message(bot, user_id, username)
in_crab_wap = _in_group(context, user_id, config["GROUPS"]["crab_wiv_a_plan"])
in_tutorial = _in_group(context, user_id, config["GROUPS"]["tutorial"])
in_video_stars = _in_group(context, user_id, config["GROUPS"]["video_stars"])
if in_crab_wap:
bot.kick_chat_member(chat_id=config["GROUPS"]["crab_wiv_a_plan"], user_id=boot_id)
bot.restrict_chat_member(chat_id=config["GROUPS"]["crab_wiv_a_plan"], user_id=boot_id,
can_send_messages=True,
can_send_media_messages=True,
can_add_web_page_previews=True,
can_send_other_messages=True)
if in_tutorial:
bot.kick_chat_member(chat_id=config["GROUPS"]["tutorial"], user_id=boot_id)
bot.restrict_chat_member(chat_id=config["GROUPS"]["tutorial"], user_id=boot_id,
can_send_messages=True,
can_send_media_messages=True,
can_add_web_page_previews=True,
can_send_other_messages=True)
if in_video_stars:
bot.kick_chat_member(chat_id=config["GROUPS"]["video_stars"], user_id=boot_id)
bot.restrict_chat_member(chat_id=config["GROUPS"]["video_stars"], user_id=boot_id,
can_send_messages=True,
can_send_media_messages=True,
can_add_web_page_previews=True,
can_send_other_messages=True)
remove_member(boot_id)
the_message = '{} has been *SUPER KICKED* from Crab Wiv A Plan, Tutorial Group, and VideoStars.' \
.format(escape_markdown(username))
bot.send_message(chat_id=config["GROUPS"]["boot_channel"],
text=the_message,
parse_mode='MARKDOWN')
bot.delete_message(chat_id=update.message.chat_id,
message_id=update.message.message_id)
|
2a6550bb533a51cc8ebb79ca7f5cdbd214af4a5a
| 3,639,344
|
import typing
from datetime import datetime
def encrypt_session(
signer: typing.Type[Fernet],
session_id: str,
current_time: typing.Optional[typing.Union[int, datetime]] = None,
) -> str:
"""An utility for generating a token from the passed session id.
:param signer: an instance of a fernet object
:param session_id: a user session id
:param current_time: a datetime object or timestamp indicating the time of the session id encryption. By default, it is now
"""
if current_time is None:
current_time = pendulum.now()
if isinstance(current_time, datetime):
current_time = current_time.timestamp()
return signer.encrypt_at_time(session_id.encode("utf-8"), int(current_time)).decode(
"utf-8"
)
|
9d924dcbc0abdf8facb31e256c5c67ccca3850be
| 3,639,345
|
def construct_chargelst(nsingle):
"""
Makes list of lists containing Lin indices of the states for given charge.
Parameters
----------
nsingle : int
Number of single particle states.
Returns
-------
chargelst : list of lists
chargelst[charge] gives a list of state indices for given charge,
chargelst[charge][ind] gives state index.
"""
nmany = np.power(2, nsingle)
chargelst = [[] for _ in range(nsingle+1)]
# Iterate over many-body states
for j1 in range(nmany):
state = integer_to_binarylist(j1, nsingle)
chargelst[sum(state)].append(j1)
return chargelst
|
e94044566d0acc7106d34d142ed3579226706a65
| 3,639,346
|
import json
def parse(json_string):
"""Constructs the Protocol from the JSON text."""
try:
json_data = json.loads(json_string)
except:
raise ProtocolParseException('Error parsing JSON: %s' % json_string)
# construct the Avro Protocol object
return make_avpr_object(json_data)
|
f95854e8c0b8e49ec71e03ee8487f88f4687ebf0
| 3,639,347
|
def get_architecture(model_config: dict, feature_config: FeatureConfig, file_io):
"""
Return the architecture operation based on the model_config YAML specified
"""
architecture_key = model_config.get("architecture_key")
if architecture_key == ArchitectureKey.DNN:
return DNN(model_config, feature_config, file_io).get_architecture_op()
elif architecture_key == ArchitectureKey.LINEAR:
# Validate the model config
num_dense_layers = len([l for l in model_config["layers"] if l["type"] == "dense"])
if num_dense_layers == 0:
raise ValueError("No dense layers were specified in the ModelConfig")
elif num_dense_layers > 1:
raise ValueError("Linear model used with more than 1 dense layer")
else:
return DNN(model_config, feature_config, file_io).get_architecture_op()
elif architecture_key == ArchitectureKey.RNN:
raise NotImplementedError
else:
raise NotImplementedError
|
a7c58770a07c225ae79a03699639e19498d3a0c6
| 3,639,349
|
def get_properties_dict(serialized_file: str, sparql_file: str, repository: str, endpoint: str, endpoint_type: str,
limit: int = 1000) -> ResourceDictionary:
"""
Return a ResourceDictionary with the list of properties in the ontology
:param serialized_file: The file where the properties ResourceDictionary is serialized
:param sparql_file: The file containing the SPARQL query
:param repository: The repository containing the ontology
:param endpoint: The SPARQL endpoint
:param endpoint_type: GRAPHDB or VIRTUOSO (to change the way the endpoint is called)
:param limit: The sparql query limit
:return: A ResourceDictionary with the list of properties in the ontology
"""
global_properties_dict = deserialize(serialized_file)
if global_properties_dict:
return global_properties_dict
global_properties_dict = ResourceDictionary()
global_properties_dict.add(RDF.type)
properties_sparql_query = open(sparql_file).read()
properties_sparql_query_template = Template(properties_sparql_query + " limit $limit offset $offset ")
for rdf_property in get_sparql_results(properties_sparql_query_template, ["property"], endpoint, repository,
endpoint_type, limit):
global_properties_dict.add(rdf_property[0])
serialize(global_properties_dict, serialized_file)
return global_properties_dict
|
3a31bd8b23cb7a940c6386225dd39a302f3d3f3a
| 3,639,350
|
def get_duplicate_sample_ids(taxonomy_ids):
"""Get duplicate sample IDs from the taxonomy table.
It happens that some sample IDs are associated with more than taxon. Which
means that the same sample is two different species. This is a data entry
error and should be removed. Conversely, having more than one sample for
a taxon is fine; it's just oversampling and will be handled later.
"""
taxonomy_ids['times'] = 0
errors = taxonomy_ids.groupby('sample_id').agg(
{'times': 'count', 'sci_name': ', '.join})
errors = errors.loc[errors.times > 1, :].drop(['times'], axis='columns')
sci_names = errors.sci_name.str.split(r'\s*[;,]\s*', expand=True)
id_cols = {i: f'sci_name_{i + 1}' for i in sci_names.columns}
sci_names = sci_names.rename(columns=id_cols)
errors = pd.concat([errors, sci_names], axis='columns').drop(
['sci_name'], axis=1)
return errors
|
c01315d6d51ec8e62a0f510944d724a18949aeb8
| 3,639,351
|
def get_settings_text(poll):
"""Compile the options text for this poll."""
text = []
locale = poll.user.locale
text.append(i18n.t('settings.poll_type',
locale=locale,
poll_type=translate_poll_type(poll.poll_type, locale)))
text.append(i18n.t('settings.language', locale=locale, language=poll.locale))
if poll.anonymous:
text.append(i18n.t('settings.anonymous', locale=locale))
else:
text.append(i18n.t('settings.not_anonymous', locale=locale))
if poll.due_date:
text.append(i18n.t('settings.due_date', locale=locale,
date=poll.get_formatted_due_date()))
else:
text.append(i18n.t('settings.no_due_date', locale=locale))
if poll.results_visible:
text.append(i18n.t('settings.results_visible', locale=locale))
else:
text.append(i18n.t('settings.results_not_visible', locale=locale))
text.append('')
if poll.allow_new_options:
text.append(i18n.t('settings.user_options', locale=locale))
else:
text.append(i18n.t('settings.no_user_options', locale=locale))
if poll.results_visible:
if poll.show_percentage:
text.append(i18n.t('settings.percentage', locale=locale))
else:
text.append(i18n.t('settings.no_percentage', locale=locale))
if poll.has_date_option():
if poll.european_date_format:
text.append(i18n.t('settings.euro_date_format', locale=locale))
else:
text.append(i18n.t('settings.us_date_format', locale=locale))
text.append('')
# Sorting of user names
if poll.poll_type == PollType.doodle.name:
sorting_name = i18n.t(f'sorting.doodle_sorting', locale=locale)
text.append(i18n.t('settings.user_sorting', locale=locale, name=sorting_name))
elif not poll.anonymous:
sorting_name = i18n.t(f'sorting.{poll.user_sorting}', locale=locale)
text.append(i18n.t('settings.user_sorting', locale=locale, name=sorting_name))
sorting_name = i18n.t(f'sorting.{poll.option_sorting}', locale=locale)
text.append(i18n.t('settings.option_sorting', locale=locale, name=sorting_name))
return '\n'.join(text)
|
24ef467070324dac6a8c698b791a1fe577a5d928
| 3,639,352
|
import functools
def pass_none(func):
"""
Wrap func so it's not called if its first param is None
>>> print_text = pass_none(print)
>>> print_text('text')
text
>>> print_text(None)
"""
@functools.wraps(func)
def wrapper(param, *args, **kwargs):
if param is not None:
return func(param, *args, **kwargs)
return wrapper
|
2264ca5978485d8fc13377d17eb84ee522a040b9
| 3,639,354
|
def create_values_key(key):
"""Creates secondary key representing sparse values associated with key."""
return '_'.join([key, VALUES_SUFFIX])
|
e8a70bc4ef84a7a62a9d8b8d915b9ddbc0990429
| 3,639,355
|
def make_mask(variable, **flags):
"""
Return a mask array, based on provided flags
For example:
make_mask(pqa, cloud_acca=False, cloud_fmask=False, land_obs=True)
OR
make_mask(pqa, **GOOD_PIXEL_FLAGS)
where GOOD_PIXEL_FLAGS is a dict of flag_name to True/False
:param variable:
:type variable: xarray.Dataset or xarray.DataArray
:param flags: list of boolean flags
:return:
"""
flags_def = get_flags_def(variable)
mask, mask_value = create_mask_value(flags_def, **flags)
return variable & mask == mask_value
|
fcdd7247359b5127d14a906298e20a05fd63b108
| 3,639,356
|
def _normalize_block_comments(content: str) -> str:
"""Add // to the beginning of all lines inside a /* */ block"""
comment_partitions = _partition_block_comments(content)
normalized_partitions = []
for partition in comment_partitions:
if isinstance(partition, Comment):
comment = partition
normalized_comment_lines = []
comment_lines = comment.splitlines(keepends=True)
normalized_comment_lines.append(comment_lines[0])
for line in comment_lines[1:]:
if line.lstrip().startswith("//"):
normalized_line = line
else:
normalized_line = f"// {line}"
normalized_comment_lines.append(normalized_line)
normalized_comment = f'/*{"".join(normalized_comment_lines)}*/'
normalized_partitions.append(normalized_comment)
else:
normalized_partitions.append(partition)
normalized_content = "".join(normalized_partitions)
return normalized_content
|
76c2c1d0b80cf40f647033aa8745058f1546076e
| 3,639,357
|
from datetime import datetime
def check_holidays(date_start, modified_end_date, holidays):
"""
Here app check if holidays in dates of vacation or not.
If Yes - add days to vacation, if Not - end date unchangeable
"""
# first end date for check loop because end date move +1 for every weekend
date_1 = datetime.strptime(date_start, '%d.%m.%Y') # start date
# second end date (after add holidays)
date_2 = datetime.strptime(modified_end_date, '%d.%m.%Y')
# third end date for finish date after adding holidays in vacations
date_3 = datetime.strptime(modified_end_date, '%d.%m.%Y')
# counter for days in vacation
x = 0
# loop for check dates in created holidays list
for i in holidays:
if date_1 <= datetime.strptime(i, '%d.%m.%Y') <= date_2:
print(i)
x += 1
date_2 = date_2 + timedelta(days=1)
print(x)
# adding counter to first end date
date_end = date_3 + timedelta(days=x)
date_end = datetime.strftime(date_end, '%d.%m.%Y')
return date_end
|
c2b8145f9963cd2679e238c2c378535eea2e08db
| 3,639,358
|
from typing import Optional
from pathlib import Path
import platform
def get_local_ffmpeg() -> Optional[Path]:
"""
Get local ffmpeg binary path.
### Returns
- Path to ffmpeg binary or None if not found.
"""
ffmpeg_path = Path(
get_spotdl_path(), "ffmpeg" + ".exe" if platform.system() == "Windows" else ""
)
if ffmpeg_path.is_file():
return ffmpeg_path
return None
|
2495a1153da32f3ffb21075172cd0fb82b7809ea
| 3,639,360
|
def _water_vapor_pressure_difference(temp, wet_bulb_temp, vap_press, psych_const):
"""
Evaluate the psychrometric formula
e_l - (e_w - gamma * (T_a - T_w)).
Parameters
----------
temp : numeric
Air temperature (K).
wet_bulb_temp : numeric
Wet-bulb temperature (K).
vap_press : numeric
Vapor pressure (Pa).
psych_const : numeric
Psychrometric constant (Pa K-1).
Returns
-------
wat_vap_press_diff : numeric
Water vapor pressure difference (Pa).
"""
sat_vap_press_wet_bulb = saturation_vapor_pressure(wet_bulb_temp)
return vap_press - (sat_vap_press_wet_bulb - psych_const * (temp - wet_bulb_temp))
|
cee814a44ae1736dc35f08984cdb15fe94576716
| 3,639,362
|
def _service_description_required(func):
"""
Decorator for checking whether the service description is available on a device's service.
"""
@wraps(func)
def wrapper(service, *args, **kwargs):
if service.description is None:
raise exceptions.NotRetrievedError('No service description retrieved for this service.')
elif service.description == exceptions.NotAvailableError:
return
return func(service, *args, **kwargs)
return wrapper
|
27b962616026ad3987d2c214138d903971e2461c
| 3,639,363
|
def vector(*args):
"""
A single vector in any coordinate basis,
as a numpy array.
"""
return N.array(args)
|
41da98ad36bff55fc4b71ce6b4e604262b2ecd1a
| 3,639,364
|
def arcmin_to_deg(arcmin: float) -> float:
""" Convert arcmin to degree """
return arcmin / 60
|
9ef01181a319c0c48542ac57602bd7c17a7c1ced
| 3,639,365
|
def soft_embedding_lookup(embedding, soft_ids):
"""Transforms soft ids (e.g., probability distribution over ids) into
embeddings, by mixing the embedding vectors with the soft weights.
Args:
embedding: A Tensor of shape `[num_classes] + embedding-dim` containing
the embedding vectors. Embedding can have dimensionality > 1, i.e.,
:attr:`embedding` can be of shape
`[num_classes, emb_dim_1, emb_dim_2, ...]`
soft_ids: A Tensor of weights (probabilities) used to mix the
embedding vectors.
Returns:
A Tensor of shape `shape(soft_ids)[:-1] + shape(embedding)[1:]`. For
example, if `shape(soft_ids) = [batch_size, max_time, vocab_size]`
and `shape(embedding) = [vocab_size, emb_dim]`, then the return tensor
has shape `[batch_size, max_time, emb_dim]`.
Example::
decoder_outputs, ... = decoder(...)
soft_seq_emb = soft_embedding_lookup(
embedding, tf.nn.softmax(decoder_outputs.logits))
"""
return tf.tensordot(tf.to_float(soft_ids), embedding, [-1, 0])
|
4b831b8f23a226aac74c0bb3919e3c27bb57dc60
| 3,639,366
|
def param_11(i):
"""Returns parametrized Exp11Gate."""
return Exp11Gate(half_turns=i)
|
5458c8a4e992bd38dbb114e9ae4c4bac8a86fc75
| 3,639,367
|
def resolve_link(db: Redis[bytes], address: hash_t) -> hash_t:
"""Resolve any link recursively."""
key = join(ARTEFACTS, address, "links_to")
link = db.get(key)
if link is None:
return address
else:
out = hash_t(link.decode())
return resolve_link(db, out)
|
b8087b2d015fc4b8515c35e437e609a935ccfcb2
| 3,639,368
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.