content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def dwa_control(x, config, goal, ob):
"""
Dynamic Window Approach control
"""
dw = calc_dynamic_window(x, config)
u, trajectory = calc_final_input(x, dw, config, goal, ob)
return u, trajectory
|
788d7c5427017436766d86cf0408eeabc4361d7e
| 3,647,534
|
def decompress_bytes(inp_bytes: bytes, verbose=False) -> bytearray:
"""
Main function to decompress input bytes by extracting the Huffman map
and using the map to replace the encoded sequences with the original
characters.
:param inp_bytes: Input data to be compressed
:param verbose: set to True for printing console outputs
:return: decompressed bytearray data
"""
huff_map: HuffCode
rem: int
huff_map, rem = extract_huff_map(inp_bytes, verbose=verbose)
inp_bytes = inp_bytes[:-rem]
rev_seq: str = reverse_final_sequence(inp_bytes, verbose=verbose)
res: bytearray = reverse_huff_sequence(huff_map, rev_seq, verbose=verbose)
return res
|
9d3287ff1e481f04edcbe9eb8e06989d5ac83bd6
| 3,647,536
|
def filter_nans(data,
threshold = 3,
threshold_type = "data"):
"""
=================================================================================================
filter_nans(data, threshold, threshold_type)
This function is meant to filter out the nan values from a list, based on the input arguments.
=================================================================================================
Arguments:
data -> A list (or iterable) of data points. The points are assumed to be numbers.
threshold -> An integer describing the minimum value requirement.
threshold_type -> A string describing how the threshold integer will be applied.
"on_data" "on_nan"
=================================================================================================
Returns: The filtered list, or an empty list if the threshold requirements were not met.
=================================================================================================
"""
# Make sure the user gave a valid thresholding option
assert threshold_type.lower() in ["data",
"on_data",
"on data",
"nan",
"on_nan",
"on nan"], "Threshold is either relative to NaN or data."
assert type(data) == list, "The data should be in a list"
# Filter NaNs, as they do not equal themselves
filtered = [val for val in data if val == val]
# Keep data if there are at least <threshold> data points
if threshold_type.lower() in ["data", "on_data", "on data"]:
if len(filtered) >= threshold:
return filtered
else:
return []
# Keep data if there are no more than <threshold> nans
elif threshold_type.lower() in ["nan", "on_nan", "on nan"]:
if len(data) - len(filtered) <= threshold:
return filtered
else:
return []
|
fe84ae2d638102e05db68f0c0062ee036be1a63b
| 3,647,537
|
def edit_seq2seq_config(config, frameworks=FULL_FRAMEWORKS, no_attn=False):
"""Rotate frameworks and optionally remove attention."""
configs = []
for fw in frameworks:
c = deepcopy(config)
c['backend'] = fw
configs.append(c)
if not no_attn:
new_configs = []
# Run the non attention version
for config in configs:
c = deepcopy(config)
c['model']['model_type'] = 'default'
new_configs.append(c)
new_configs.append(config)
configs = new_configs
return configs
|
bca93003cf67cc1c0ec14ba1dfa83664b10191fb
| 3,647,538
|
from typing import Optional
def get_bioportal_prefix(prefix: str) -> Optional[str]:
"""Get the Bioportal prefix if available."""
return _get_mapped_prefix(prefix, "bioportal")
|
f68ec16b8de886ab76319b06d4cf68c14a90fc53
| 3,647,539
|
def _obtain_rapt(request, access_token, requested_scopes):
"""Given an http request method and reauth access token, get rapt token.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
access_token (str): reauth access token
requested_scopes (Sequence[str]): scopes required by the client application
Returns:
str: The rapt token.
Raises:
google.auth.exceptions.ReauthError: if reauth failed
"""
msg = _get_challenges(
request,
list(challenges.AVAILABLE_CHALLENGES.keys()),
access_token,
requested_scopes,
)
if msg["status"] == _AUTHENTICATED:
return msg["encodedProofOfReauthToken"]
for _ in range(0, RUN_CHALLENGE_RETRY_LIMIT):
if not (
msg["status"] == _CHALLENGE_REQUIRED or msg["status"] == _CHALLENGE_PENDING
):
raise exceptions.ReauthFailError(
"Reauthentication challenge failed due to API error: {}".format(
msg["status"]
)
)
if not is_interactive():
raise exceptions.ReauthFailError(
"Reauthentication challenge could not be answered because you are not"
" in an interactive session."
)
msg = _run_next_challenge(msg, request, access_token)
if msg["status"] == _AUTHENTICATED:
return msg["encodedProofOfReauthToken"]
# If we got here it means we didn't get authenticated.
raise exceptions.ReauthFailError("Failed to obtain rapt token.")
|
8c430df5c4198af8d044bd3151cdb7af605c14b1
| 3,647,540
|
def argunique(items, key=None):
"""
Returns indices corresponding to the first instance of each unique item.
Args:
items (Sequence[VT]): indexable collection of items
key (Callable[[VT], Any], default=None): custom normalization function.
If specified returns items where ``key(item)`` is unique.
Returns:
Iterator[int] : indices of the unique items
Example:
>>> import ubelt as ub
>>> items = [0, 2, 5, 1, 1, 0, 2, 4]
>>> indices = list(ub.argunique(items))
>>> assert indices == [0, 1, 2, 3, 7]
>>> indices = list(ub.argunique(items, key=lambda x: x % 2 == 0))
>>> assert indices == [0, 2]
"""
if key is None:
return unique(range(len(items)), key=lambda i: items[i])
else:
return unique(range(len(items)), key=lambda i: key(items[i]))
|
fd7af970578aac1a13a3123f13aac9daef1a4b7a
| 3,647,541
|
def promote_cvals(*vals):
"""
Promote Python values into the most general dshape containing
all of them. Only defined over simple CType instances.
>>> promote_vals(1,2.)
dshape("float64")
>>> promote_vals(1,2,3j)
dshape("complex128")
"""
promoted = np.result_type(*vals)
datashape = CType.from_dtype(promoted)
return datashape
|
3a928ca061bdc8fedf1cb6e125994c4b7167e0c7
| 3,647,542
|
def load_directory_metadata(directory_path, return_copy=True):
"""
Get stored metadata for files in path. This currently only stores bookmarks.
If no metadata is available, return an empty dictionary.
This is a hidden file in the directory which stores metadata for all files
in the directory, as well as the directory itself. This has a bunch of
advantages over putting the data in each file:
- Every file format has its own way of storing metadata, and there are no
robust libraries that handle all of them.
- We don't have to modify the user's files, so there's no chance of us screwing
up and causing data loss.
- Opening each file during a refresh is extremely slow. It's much faster to
have a single file that we only read once per directory scan.
- We can use Windows Search to search this data if we format it properly. Use
a file extension that it indexes by default (we use .txt), and we can insert
keywords in the file that we can search for. Windows Search will index metadata
for some file types, but it's hit-or-miss (it handles JPEGs much better than PNGs).
"""
with _metadata_lock:
return _load_directory_metadata_locked(directory_path, return_copy=return_copy)
|
4033c1fae5c5330ef1254a13c97f33af43e39984
| 3,647,543
|
def _traverse_tree_and_group_all_objects_by_oclass(root_obj, result=None):
"""Traverses the tree once and groups all objects by oclass
:param root_obj: The root object where to start the traversion
:type root_obj: CUDS
:param result: The current results of the recursion, defaults to None
:type result: dict, optional
:return: All CUDS objects in the tree, grouped by oclass.
:rtype: dict
"""
if result is None:
result = {str(root_obj.oclass): [root_obj]}
for neighbour in root_obj.iter():
if neighbour.oclass not in result.keys():
result[str(neighbour.oclass)] = [neighbour]
else:
result[str(neighbour.oclass)].append(neighbour)
_traverse_tree_and_group_all_objects_by_oclass(neighbour, result)
return result
|
3ae139313ea7b5e92f0d9231a4e64efc87acc5ac
| 3,647,544
|
def check_measurement(m_info, filters):
"""
Determine whether a given measurement should be included based on the
filters.
Inputs:
m_info - A dictionary containing the configuration parameters for an
individual measurement.
filters - A dictionary containing a set of configuration parameter
values that should be included
Output:
include - Boolean indicating whether to include the given measurement
"""
include = True
for filter_field, filter_values in filters.iteritems():
try:
iter(filter_values)
except:
filter_values = [filter_values]
if not m_info[filter_field] in filter_values:
include = False
return include
|
374be08c315a63d09faadc9c963a49a89b04b3ed
| 3,647,545
|
def audiosegment2wav(data: AudioSegment):
"""
pydub.AudioSegment格式转为音频信号wav。
:param data:
:return:
"""
wav = np.array(data.get_array_of_samples()) / _int16_max
return wav
|
44f75bf26ae0f3e11c3d9480aee38c2ad943ae86
| 3,647,546
|
def embargo(cand_times, test_times, embargo_table):
"""
"Embargo" observations from the training set.
Args:
cand_times(Series): times of candidates to be the "embargoed set"
index: t0(start time)
value: t1(end time)
test_times(Series): times of the test set
index: t0(start time)
value: t1(end time)
embargo_table(Series): embargo times table returned by get_embargo_table()
Returns:
embargoed_times(Series): times of embargoed training set
index: t0(start time)
value: t1(end time)
"""
first_test_start = test_times.index[0]
final_test_start = test_times.index[-1]
final_embargo_start = embargo_table[final_test_start] # end time of the embargo
to_embargo_idx = cand_times.loc[first_test_start:final_embargo_start].index
embargoed_times = cand_times.drop(to_embargo_idx)
return embargoed_times
|
6fb97816c32fc73661905af27613bef0c6ac0726
| 3,647,547
|
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the WiZ Light platform from config_flow."""
# Assign configuration variables.
wiz_data = hass.data[DOMAIN][entry.entry_id]
wizbulb = WizBulbEntity(wiz_data.bulb, entry.data.get(CONF_NAME), wiz_data.scenes)
# Add devices with defined name
async_add_entities([wizbulb], update_before_add=True)
return True
|
c65665220f81a5c918cf8eac7839159b4296a968
| 3,647,548
|
async def check_account():
"""
A check that checks if the user has an account and if not creates one for them.
"""
async def check(ctx) -> bool:
conn = get_db()
cur = conn.cursor()
cur.execute("SELECT * FROM economy WHERE user_id = ?", (ctx.author.id,))
if cur.fetchone() is None:
cur.execute("INSERT INTO economy (user_id) VALUES (?)", (ctx.author.id,))
cur.execute("INSERT INTO cooldowns (user_id) VALUES (?)", (ctx.author.id,))
conn.commit()
cur.close()
conn.close()
return True
return check
|
205e39405eb52b57f743dfabca11c04cf11f0f34
| 3,647,550
|
def mtf_image_transformer_base_cifar():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base()
hparams.mesh_shape = "batch:8"
hparams.layout = "batch:batch"
hparams.learning_rate_decay_steps = 13600 # one epoch
hparams.batch_size = 32
hparams.num_heads = 4
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.d_ff = 2048
hparams.learning_rate = 0.5
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
hparams.unconditional = True
return hparams
|
0c70aac1ffe03eea62d581a6a4ab6b84495af079
| 3,647,551
|
def init_group_prams(net):
"""Initialize group_prams."""
decayed_params = []
no_decayed_params = []
for param in net.trainable_params():
if 'beta' not in param.name and 'gamma' not in param.name and 'bias' not in param.name:
decayed_params.append(param)
else:
no_decayed_params.append(param)
group_params = [{'params': decayed_params, 'weight_decay': 0.0001},
{'params': no_decayed_params},
{'order_params': net.trainable_params()}]
return group_params
|
be078603c4ae42163f66668dcc16a0a77d899805
| 3,647,553
|
def nni_differences_parameters(nni=None, rpeaks=None):
"""Computes basic statistical parameters from a series of successive NN interval differences (mean, min, max, standard deviation).
Parameters
----------
nni : array
NN intervals in [ms] or [s].
rpeaks : array
R-peak times in [ms] or [s].
Returns (biosppy.utils.ReturnTuple Object)
------------------------------------------
[key : format]
Description.
nni_diff_mean: float
Mean NN interval difference [ms].
nni_diff_min : float
Minimum NN interval difference [ms].
nni_diff_max : float
Maximum NN interval difference [ms].
Notes
-----
.. Only one type of input data is required.
.. If both 'nni' and 'rpeaks' are provided, 'nni' will be chosen over the 'rpeaks'
.. NN and R-peak series provided in [s] format will be converted to [ms] format.
"""
# Check input
nn = tools.check_input(nni, rpeaks)
# Get NN interval differences
nnd = tools.nni_diff(nn)
# output
args = (float(nnd.mean()), int(nnd.min()), int(nnd.max()), )
names = ('nni_diff_mean', 'nni_diff_min', 'nni_diff_max', )
return utils.ReturnTuple(args, names)
|
aadea3b440fe4ac3c06cbd88cde69e11566e861f
| 3,647,554
|
def contextualize_model(model, cell_line, genes):
"""Contextualize model at the level of a PySB model."""
# Here we just make a PysbAssembler to be able
# to apply set_context on the model being passed in
model.name = cell_line
cell_line_ccle = cell_line + '_SKIN'
pa = PysbAssembler()
pa.model = model
pa.set_context(cell_line_ccle)
# Set initial conditions for missense mutations
variants = read_ccle_variants(genes)
mutations = variants['missense'][cell_line_ccle]
for gene, mut_list in mutations.items():
for fres, loc, tres in mut_list:
site_name = fres + loc
for ic in model.initial_conditions:
if ic[0].monomer_patterns[0].monomer.name == gene:
sc = ic[0].monomer_patterns[0].site_conditions
if site_name in sc:
sc[site_name] = tres
return pa.model
|
7f0018b0e1308a354529893fcd8ac54bb9fa7642
| 3,647,555
|
def _quaternionInverse(quat):
""" Inverses a list of quaternions
"""
quat_ = np.empty((quat.shape[0],4))
# For every quaternion
for i in range(quat.shape[0]):
mag = quat[i,0]**2 + quat[i,1]**2 + quat[i,2]**2 + quat[i,3]**2
quat_[i,0] = -quat[i,0]/mag
quat_[i,1] = -quat[i,1]/mag
quat_[i,2] = -quat[i,2]/mag
quat_[i,3] = quat[i,3]/mag
return quat_
|
a70868d3b38fe087c83a52c1a7cabc32f05310dc
| 3,647,556
|
from typing import Union
def load_dataset(files: list[str]) -> Union[list[int], list[list[list[int]]]]:
"""load the images and labels of the test dataset
Args:
files (list[str]): list of files path for images and label dataset
Returns:
Union[list[int], list[list[list[int]]]]: list of labels and list of int matrixes
"""
print("loading the dataset...")
with open(files[0], "rb") as image_file:
megic_number = int.from_bytes(image_file.read(4), 'big', signed=True)
number_of_images = int.from_bytes(image_file.read(4), 'big', signed=True)
rows = int.from_bytes(image_file.read(4), 'big', signed=True)
cols = int.from_bytes(image_file.read(4), 'big', signed=True)
images = []
for _ in range(number_of_images):
matrix = []
for _ in range(rows):
row = []
for _ in range(cols):
row.append(int.from_bytes(image_file.read(1), 'big', signed=False))
matrix.append(row)
images.append(matrix)
with open(files[1], "rb") as label_file:
megic_number = int.from_bytes(label_file.read(4), 'big', signed=True)
number_of_labels = int.from_bytes(label_file.read(4), 'big', signed=True)
labels = []
for _ in range(number_of_labels):
labels.append(int.from_bytes(label_file.read(1), 'big', signed=False))
return labels, images
|
e9635b8b9a4f92d96df8e0dea97a569a1b49b02d
| 3,647,557
|
def get_minion_node_ips(boot_conf, hb_conf):
"""
Returns a list of IPs for all master nodes
:param boot_conf: the snaps-boot configuration dict
:param hb_conf: the adrenaline configuration dict
:return: a list of IP addresses
"""
return __get_node_ips(boot_conf, hb_conf, 'minions')
|
c36ccc30043d2bb7a43314f6665b35ae9e1c47f4
| 3,647,558
|
def _normalize_sql(sql, maxlen=150):
"""Collapse whitespace and middle-truncate if needed."""
out = ' '.join(sql.split())
if len(out) > maxlen:
i = int(maxlen / 2 - 4)
out = (out[0:i] +
' . . . ' +
out[-i:None])
return out
|
f85efb0c367b448d2e363d9c1f8bf62a2bdb600e
| 3,647,559
|
from typing import Dict
def utt_non_punct_dialog(dialog: Dict):
"""
Used by: book_skill
"""
dialog = utils.get_last_n_turns(dialog)
dialog = utils.remove_clarification_turns_from_dialog(dialog)
return [{"dialogs": [dialog]}]
|
6ef4bf4fee0d8a4bba9fe140e476682e84064060
| 3,647,560
|
def griddata_easy(xx, yy, data, xi=None, yi=None, dx=None, dy=None, nx=10, ny=10, method='nearest', fill_value=None):
"""
Generate a girdded data from scattered data z=f(x, y)
... Wrapper of scipy.interplate.riddata
Parameters
----------
xx: nd array-like
x-coordinate of scattered data
yy: nd array-like
y-coordinate of scattered data
data: nd array-like
values of scattered data
xi: 1d array
x-coordinate of the interpolated grid
... The array must be monotonically increasing.
... If None, xi = np.arange(xmin, xmax, dx)
yi: 1d array
y-coordinate of the interpolated grid
... The array must be monotonically increasing.
... If None, yi = np.arange(ymin, ymax, dy)
dx: float
spacing of 'xi' if 'xi' is not given
dy: float
spacing of 'xi' if 'xi' is not given
nx: int
if 'dx' were not given, dx is set as (xmax-xmin)/nx
ny: int
if 'dy' were not given, dx is set as (ymax-ymin)/ny
method: method of 2D interpolation
... Options: 'nearest', 'linear', 'cubic'
Returns
-------
xxi: 2d array
x-coordinate of the grid
yyi: 2d array
x-coordinate of the grid
data_i: 2d array
values on the grid
"""
xx, yy, data = np.asarray(xx), np.asarray(yy), np.asarray(data)
if not xx.shape == yy.shape == data.shape:
print('x.shape, y.shape, and data.shape must match. ', xx.shape, yy.shape, data.shape)
raise ValueError('shapes of x, y, and data do not match.')
x, y, data1d = xx.flatten(), yy.flatten(), data.flatten()
if xi is None:
xmin, xmax = np.nanmin(x), np.nanmax(x)
if dx is None:
dx = (xmax - xmin) / nx
xi = np.arange(xmin, xmax, dx)
if yi is None:
ymin, ymax = np.nanmin(y), np.nanmax(y)
if dy is None:
dy = (ymax - ymin) / ny
yi = np.arange(ymin, ymax, dy)
xxi, yyi = np.meshgrid(xi, yi)
# interpolate
data_i = griddata((x, y), data1d, (xxi, yyi), method=method, fill_value=fill_value)
return xxi, yyi, data_i
|
77c5c92e5176c62252f7c6814e3483d8a1323925
| 3,647,561
|
def emit_cover(ctx, go_toolchain,
source = None,
mode = None,
importpath = ""):
"""See go/toolchains.rst#cover for full documentation."""
if source == None: fail("source is a required parameter")
if mode == None: fail("mode is a required parameter")
if not importpath: fail("importpath is a required parameter")
stdlib = go_toolchain.stdlib.get(ctx, go_toolchain, mode)
covered = []
cover_vars = []
for s in source.entries:
if not s.want_coverage:
covered.append(s)
continue
outputs = []
for src in s.srcs:
if not src.basename.endswith(".go"):
outputs.append(src)
continue
cover_var = "Cover_" + src.basename[:-3].replace("-", "_").replace(".", "_")
cover_vars.append("{}={}={}".format(cover_var, src.short_path, importpath))
out = declare_file(ctx, path=cover_var, ext='.cover.go')
outputs.append(out)
args = ctx.actions.args()
add_go_env(args, stdlib, mode)
args.add(["--", "--mode=set", "-var=%s" % cover_var, "-o", out, src])
ctx.actions.run(
inputs = [src] + stdlib.files,
outputs = [out],
mnemonic = "GoCover",
executable = go_toolchain.tools.cover,
arguments = [args],
)
members = structs.to_dict(s)
members["srcs"] = outputs
covered.append(GoSource(**members))
return GoSourceList(entries=covered), cover_vars
|
d390f534e723a893ca5e8b23a90ae4008abf79fe
| 3,647,562
|
def shortdate(date=None):
"""turn (timestamp, tzoff) tuple into iso 8631 date."""
return datestr(date, format='%Y-%m-%d')
|
9478c96e8abd95a8cc5822b111b139572693ac8b
| 3,647,563
|
from datetime import datetime
import time
import numpy
def default_fram( object_to_serialize):
"""
Python json api custom serializer function for FRAM Warehouse API
per:'Specializing JSON object encoding', https://simplejson.readthedocs.org
>>> import simplejson as json
>>> json.dumps({'Without':[1,'a',datetime(1999, 1, 1),'Serializer']})
Traceback (most recent call last):
...
TypeError: datetime.datetime(1999, 1, 1, 0, 0) is not JSON serializable
>>> dict2 = {'With':[1,'a',datetime(1999, 1, 1),'Serializer']}
>>> json.dumps( dict2, default=default_fram)
'{"With": [1, "a", "1999-01-01T00:00:00Z", "Serializer"]}'
>>> dict3 = {'With':[1,'a',date(1999, 1, 1),'Serializer']}
>>> json.dumps( dict3, default=default_fram)
'{"With": [1, "a", "1999-01-01", "Serializer"]}'
>>> dict4 = {'With':[1,'a',time(4, 5, 6),'Serializer']}
>>> json.dumps( dict4, default=default_fram)
'{"With": [1, "a", "1970-01-01T04:05:06Z", "Serializer"]}'
>>> numpy_64bit_int = {'With':[1,numpy.int64(5678),'Support']}
>>> json.dumps(numpy_64bit_int, default=default_fram)
'{"With": [1, 5678, "Support"]}'
>>> numpy_32bit_int = {'With':[1,numpy.int32(5678),'Support']}
>>> json.dumps(numpy_64bit_int, default=default_fram)
'{"With": [1, 5678, "Support"]}'
>>> numpy_16bit_int = {'With':[1,numpy.int16(5678),'Support']}
>>> json.dumps(numpy_64bit_int, default=default_fram)
'{"With": [1, 5678, "Support"]}'
"""
#Bake datetime objects into Strings
if isinstance( object_to_serialize, datetime):
if object_to_serialize.utcoffset() is None:
#Append 'Z', to conform to ISO8601 date spec
return object_to_serialize.isoformat()+'Z'
#Else, TZ offset present. TZ info will be automatically included per
# docs.python.org/3/library/datetime.html#datetime.datetime.isoformat
return object_to_serialize.isoformat()
if isinstance( object_to_serialize, date):
# No Timezone info available,
return object_to_serialize.isoformat()
if isinstance( object_to_serialize, time):
#No date available.Prefix:'1970-01-01T',to conform to ISO8601 date spec
isoformat = '1970-01-01T'+object_to_serialize.isoformat()
if object_to_serialize.utcoffset() is None:
# No Timezone info available,
# Append 'Z',to conform to ISO8601 date spec
return isoformat+'Z'
#else, TZ offset has already been added to string.
return isoformat
if isinstance(object_to_serialize, numpy.integer):
return int(object_to_serialize) #per Python issue24313, no support for numpy Ints
#Else, wasnt a datetime Date & we dont handle anything else.. so:
raise TypeError(repr(object_to_serialize) + " is not JSON serializable")
|
bb345b01b7ba86e2e47515addda854d16983f036
| 3,647,564
|
import timeit
def _benchmark_grep(filename, pattern):
"""Benchmarks grep.
Args:
- filename: The name of the file to be searched.
- pattern: The pattern we are searching for in the file.
"""
time_taken = timeit(setup=BENCHMARK_SETUP, number=SINGLE_STRING_TESTS,
stmt='subprocess.call(%s)' % GREP_CALL_ARGS.format(pattern, filename))
return time_taken / SINGLE_STRING_TESTS
|
f1d3a4b9f6d5f7867f49a6eb3bdc6236111d5277
| 3,647,567
|
import pathlib
def inotify_test(
test_paths: dict[str, pathlib.Path], tmp_path: pathlib.Path
) -> InotifyTest:
"""Generate a pre-configured test instance of `inotify_simple.INotify`.
Parameters
----------
test_paths: dict[str, pathlib.Path]
The test fixture that generates test files based on configuration
(:obj:`test_paths`).
tmp_path: pathlib.Path
The pytest `tmp_path` fixture providing a path object to a temporary
directory which is unique to each test function
(:obj:`_pytest.tmpdir.tmp_path`).
Returns
-------
inotify_simple: InotifyTest
A pre-configured `InotifyTest` object with the specified test paths.
"""
inotify = InotifyTest(tmp_path)
for key, path in test_paths.items():
inotify.add_watch(path)
return inotify
|
e64975dc2765e3c887194cbf88a0f47ef3d5311e
| 3,647,568
|
def set_system_bios( context, settings, system_id = None ):
"""
Finds a system matching the given ID and sets the BIOS settings
Args:
context: The Redfish client object with an open session
settings: The settings to apply to the system
system_id: The system to locate; if None, perform on the only system
Returns:
The response of the PATCH
"""
# Locate the system
system = get_system( context, system_id )
# Get the BIOS resource and determine if the settings need to be applied to the resource itself or the settings object
bios_uri = system.dict["Bios"]["@odata.id"]
bios = context.get( bios_uri )
etag = bios.getheader( "ETag" )
if "@Redfish.Settings" in bios.dict:
bios_uri = bios.dict["@Redfish.Settings"]["SettingsObject"]["@odata.id"]
bios_settings = context.get( bios_uri )
etag = bios_settings.getheader( "ETag" )
# Update the settings
payload = { "Attributes": settings }
headers = None
if etag is not None:
headers = { "If-Match": etag }
response = context.patch( bios_uri, body = payload, headers = headers )
verify_response( response )
return response
|
68ceeb63ec74f3459f8cfea1eb6eb9d668bff15e
| 3,647,569
|
def create() -> UserSecurityModel:
"""
Creates a new instance of the USM
"""
return UserSecurityModel()
|
1e07d9bc6359a2ca000b886de416147d85720c9c
| 3,647,570
|
def clDice(v_p, v_l):
"""[this function computes the cldice metric]
Args:
v_p ([bool]): [predicted image]
v_l ([bool]): [ground truth image]
Returns:
[float]: [cldice metric]
"""
if len(v_p.shape)==2:
tprec = cl_score(v_p,skeletonize(v_l))
tsens = cl_score(v_l,skeletonize(v_p))
elif len(v_p.shape)==3:
tprec = cl_score(v_p,skeletonize_3d(v_l))
tsens = cl_score(v_l,skeletonize_3d(v_p))
return 2*tprec*tsens/(tprec+tsens)
|
f8a6947ca1487878e9e33c5c7aed3604565801e3
| 3,647,571
|
import re
def validate_regex(regex_str):
"""
Checks if a given string is valid regex
:param str regex_str: a suspicios string that may or may not be valid regex
:rtype: bool
:return: True if valid regex was give, False in case of TypeError or re.error
"""
# another of those super basic function where i am not sure if there isn't an easier way
try:
re.compile(regex_str)
return True
except re.error:
return False
except TypeError: # for the string not being one
return False
|
97c6e2338eb67c2d4be74e3a18a4393a1eb36242
| 3,647,572
|
import json
def load_stats_from_file(date):
"""
Load stats data from a stat file.
Params:
date -- a `datetime` instance.
"""
file_path = _build_stats_file_path(date)
if not isfile(file_path):
raise IOError # This will be FileNotFoundError in Python3.
with open(file_path, 'r') as fin:
return json.loads(fin.read())
|
b2bb85f6a492ca26441271222f10373e200497e1
| 3,647,573
|
def null_gt_null(left, right):
""":yaql:operator >
Returns false. This function is called when left and right are null.
:signature: left > right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> null > null
false
"""
return False
|
f99a985ae1b0e678afb315ed441d33064dd281b0
| 3,647,574
|
def read_header(file):
""" Read the information in an OpenFOAM file header.
Parameters
----------
file : str
Name (path) of OpenFOAM file.
Returns
-------
info : dictionary
The information in the file header.
"""
with open(file, 'r') as f:
content = f.read()
info = {}
info['file'] = file
# read logo
logo_info = _read_logo(content)
info['foam_version'] = logo_info['Version']
info['website'] = logo_info['Website']
# read header
header_info = _read_header_info(content)
info['foam_class'] = header_info['foam_class']
info['name'] = header_info['name']
info['location'] = header_info['location']
return info
|
91446555ed31953ea4290e76db51872eb1ef3ae9
| 3,647,575
|
def point_from_b58(b):
"""Return b58 decoded P."""
x, y = [int_from_b58(t) for t in b.split(",")]
return ECC.EccPoint(x=x, y=y, curve=CURVE)
|
4f5b9dfe60c745b17ffb54535a8994273d07c675
| 3,647,576
|
def _cp_embeds_into(cp1, cp2):
"""Check that any state in ComplexPattern2 is matched in ComplexPattern1.
"""
# Check that any state in cp2 is matched in cp1
# If the thing we're matching to is just a monomer pattern, that makes
# things easier--we just need to find the corresponding monomer pattern
# in cp1
if cp1 is None or cp2 is None:
return False
cp1 = as_complex_pattern(cp1)
cp2 = as_complex_pattern(cp2)
if len(cp2.monomer_patterns) == 1:
mp2 = cp2.monomer_patterns[0]
# Iterate over the monomer patterns in cp1 and see if there is one
# that has the same name
for mp1 in cp1.monomer_patterns:
if _mp_embeds_into(mp1, mp2):
return True
return False
|
67e410eb3ba1131f144829b724ad7099807d4e4e
| 3,647,577
|
def get_tags_for_message(khoros_object, msg_id):
"""This function retrieves the tags for a given message.
.. versionadded:: 2.8.0
:param khoros_object: The core :py:class:`khoros.Khoros` object
:type khoros_object: class[khoros.Khoros]
:param msg_id: The Message ID for the message from which to retrieve tags
:type msg_id: str, int
:returns: A list of tags associated with the message
"""
tag_list = []
query = f"SELECT text FROM tags WHERE messages.id = '{msg_id}'" # nosec
response = liql.perform_query(khoros_object, liql_query=query, verify_success=True)
entries = api.get_items_list(response)
for entry in entries:
tag_list.append(entry['text'])
return tag_list
|
563df4344f9291d9114450a994145610ef79ae8f
| 3,647,578
|
def _build_hierarchical_histogram_computation(
lower_bound: float, upper_bound: float, num_bins: int,
aggregation_factory: factory.UnweightedAggregationFactory):
"""Utility function creating tff computation given the parameters and factory.
Args:
lower_bound: A `float` specifying the lower bound of the data range.
upper_bound: A `float` specifying the upper bound of the data range.
num_bins: The integer number of bins to compute.
aggregation_factory: The aggregation factory used to construct the federated
computation.
Returns:
A tff federated computation function.
"""
@computations.tf_computation(computation_types.SequenceType(tf.float32))
def client_work(client_data):
return _discretized_histogram_counts(client_data, lower_bound, upper_bound,
num_bins)
aggregator = aggregation_factory.create(client_work.type_signature.result)
@computations.federated_computation(
computation_types.at_clients(client_work.type_signature.parameter))
def hierarchical_histogram_computation(federated_client_data):
# Work done at clients.
client_histogram = intrinsics.federated_map(client_work,
federated_client_data)
# Aggregation to server.
return aggregator.next(aggregator.initialize(), client_histogram).result
return hierarchical_histogram_computation
|
38d5c711bcd6d6cd8965f7e8e85b0933363a2a7b
| 3,647,579
|
import inspect
def check_endpoint(func):
"""Check available endpoint."""
@wraps(func)
def wrapper(*args, **kwargs):
sig = inspect.signature(func)
args_value = sig.bind(*args, **kwargs)
endpoint = args_value.arguments["endpoint"]
if endpoint not in AVAILABLE_ENDPOINTS:
raise ClientException(f"Unavailable endpoints: {endpoint}")
return func(*args, **kwargs)
return wrapper
|
1e833dc8c3d43b6c09bd2b3bc89846ce29952cbd
| 3,647,580
|
def read_sql_one(id):
"""
This function responds to a request for api/reviews/{id}
with one matching review from reviews
:param id: id of the review
:return: review matching the id
"""
response = Response.query.filter_by(id=id).one_or_none()
if response is not None:
# serialize the data for the response
response_schema = ResponseSchema()
return response_schema.dump(response).data
else:
abort(404, f"Review {id} not found.")
|
d54abca40fb6d44adf0988bc44484da3af3efb22
| 3,647,581
|
from typing import Tuple
def ds_to_numpy(ds: Dataset) -> Tuple[np.ndarray, np.ndarray]:
"""Transform torch dataset to numpy arrays
Parameters
----------
ds : Dataset
COVID dataset
Returns
-------
Tuple[np.ndarray, np.ndarray]
Flattened images + labels
"""
imgs = []
labels = []
for img, label in ds:
imgs.append(img.detach().cpu().numpy().flatten()[np.newaxis, ])
labels.append(label)
return np.concatenate(imgs), np.array(labels)
|
218eaf582b36a562920bc2e8808b3524a900b8ef
| 3,647,582
|
import base64
def _b64(b):
"""Helper function base64 encode for jose spec."""
return base64.urlsafe_b64encode(b).decode('utf8').replace("=", "")
|
4777d4f47de2c72b8dd95b765fc54d1abc6763f0
| 3,647,583
|
def load_from_arff(filename, label_count, label_location="end",
input_feature_type='float', encode_nominal=True, load_sparse=False,
return_attribute_definitions=False):
"""Method for loading ARFF files as numpy array
Parameters
----------
filename : str
path to ARFF file
labelcount: integer
number of labels in the ARFF file
endian: str {"big", "little"} (default is "big")
whether the ARFF file contains labels at the beginning of the
attributes list ("start", MEKA format)
or at the end ("end", MULAN format)
input_feature_type: numpy.type as string (default is "float")
the desire type of the contents of the return 'X' array-likes,
default 'i8', should be a numpy type,
see http://docs.scipy.org/doc/numpy/user/basics.types.html
encode_nominal: bool (default is True)
whether convert categorical data into numeric factors - required
for some scikit classifiers that can't handle non-numeric
input features.
load_sparse: boolean (default is False)
whether to read arff file as a sparse file format, liac-arff
breaks if sparse reading is enabled for non-sparse ARFFs.
return_attribute_definitions: boolean (default is False)
whether to return the definitions for each attribute in the
dataset
Returns
-------
X : :mod:`scipy.sparse.lil_matrix` of `input_feature_type`, shape=(n_samples, n_features)
input feature matrix
y : :mod:`scipy.sparse.lil_matrix` of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
names of attributes : List[str]
list of attribute names from ARFF file
"""
if not load_sparse:
arff_frame = arff.load(
open(filename, 'r'), encode_nominal=encode_nominal, return_type=arff.DENSE
)
matrix = sparse.csr_matrix(
arff_frame['data'], dtype=input_feature_type
)
else:
arff_frame = arff.load(
open(filename, 'r'), encode_nominal=encode_nominal, return_type=arff.COO
)
data = arff_frame['data'][0]
row = arff_frame['data'][1]
col = arff_frame['data'][2]
matrix = sparse.coo_matrix(
(data, (row, col)), shape=(max(row) + 1, max(col) + 1)
)
if label_location == "start":
X, y = matrix.tocsc()[:, label_count:].tolil(), matrix.tocsc()[:, :label_count].astype(int).tolil()
feature_names = arff_frame['attributes'][label_count:]
label_names = arff_frame['attributes'][:label_count]
elif label_location == "end":
X, y = matrix.tocsc()[:, :-label_count].tolil(), matrix.tocsc()[:, -label_count:].astype(int).tolil()
feature_names = arff_frame['attributes'][:-label_count]
label_names = arff_frame['attributes'][-label_count:]
else:
# unknown endian
return None
if return_attribute_definitions:
return X, y, feature_names, label_names
else:
return X, y
|
d203b6360d3212e7e6a37f0ff434e17dfacfe6a0
| 3,647,584
|
def gapfill_to_ensemble(model, iterations=1, universal=None, lower_bound=0.05,
penalties=None, exchange_reactions=False,
demand_reactions=False, integer_threshold=1e-6):
"""
Performs gapfilling on model, pulling reactions from universal.
Any existing constraints on base_model are maintained during gapfilling, so
these should be set before calling gapfill_to_ensemble (e.g. secretion of
metabolites, choice of objective function etc.).
Currently, only iterative solutions are supported with accumulating
penalties (i.e. after each iteration, the penalty for each reaction
doubles).
Parameters
----------
model : cobra.Model
The model to perform gap filling on.
universal : cobra.Model
A universal model with reactions that can be used to complete the
model.
lower_bound : float, 0.05
The minimally accepted flux for the objective in the filled model.
penalties : dict, None
A dictionary with keys being 'universal' (all reactions included in
the universal model), 'exchange' and 'demand' (all additionally
added exchange and demand reactions) for the three reaction types.
Can also have reaction identifiers for reaction specific costs.
Defaults are 1, 100 and 1 respectively.
integer_threshold : float, 1e-6
The threshold at which a value is considered non-zero (aka
integrality threshold). If gapfilled models fail to validate,
you may want to lower this value. However, picking a threshold that is
too low may also result in reactions being added that are not essential
to meet the imposed constraints.
exchange_reactions : bool, False
Consider adding exchange (uptake) reactions for all metabolites
in the model.
demand_reactions : bool, False
Consider adding demand reactions for all metabolites.
Returns
-------
ensemble : medusa.core.Ensemble
The ensemble object created from the gapfill solutions.
"""
gapfiller = GapFiller(model, universal=universal,
lower_bound=lower_bound, penalties=penalties,
demand_reactions=demand_reactions,
exchange_reactions=exchange_reactions,
integer_threshold=integer_threshold)
solutions = gapfiller.fill(iterations=iterations)
print("finished gap-filling. Constructing ensemble...")
ensemble = _build_ensemble_from_gapfill_solutions(model,solutions,
universal=universal)
return ensemble
|
1e5b2c6e413afc1b745867f931d4fbc7c33babcc
| 3,647,585
|
import torch
def reparameterize(mu, logvar, n_samples=1):
"""Reparameterization trick.
Args:
mu (torch.Tensor): Mean.
logvar (torch.Tensor): Logarithm of variation.
n_samples (int): The number of samples.
Returns:
torch.Tensor: Samples drawn from the given Gaussian distribution.
The shape is equal to mu if n_samples is 1,
and (n_samples, *mu.shape) if n_samples is larger than 1.
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn(n_samples, *std.size(), device=std.device)
z = mu + eps * std
return z.squeeze(0)
|
726473147ee28f470ad7d543e2b36bc512ffd0ae
| 3,647,586
|
def rotationMatrixFromNormals(v0,v1,tol=1e-20):
"""
Performs the minimum number of rotations to define a rotation from the direction indicated by the vector n0 to the direction indicated by n1.
The axis of rotation is n0 x n1
https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
:param numpy.array v0: vector of length 3
:param numpy.array v1: vector of length 3
:param tol = 1e-20: tolerance. If the norm of the cross product between the two vectors is below this, no rotation is performed
:rtype: numpy.array, 3x3
:return: rotation matrix which rotates the frame so that n0 is aligned with n1
"""
# ensure both n0, n1 are vectors of length 1
assert len(v0) == 3, "Length of n0 should be 3"
assert len(v1) == 3, "Length of n1 should be 3"
# ensure both are true normals
n0 = v0*1./np.linalg.norm(v0)
n1 = v1*1./np.linalg.norm(v1)
n0dotn1 = n0.dot(n1)
# define the rotation axis, which is the cross product of the two vectors
rotAx = crossProd(n0,n1)
if np.linalg.norm(rotAx) < tol:
return np.eye(3,dtype=float)
rotAx *= 1./np.linalg.norm(rotAx)
cosT = n0dotn1/(np.linalg.norm(n0)*np.linalg.norm(n1))
sinT = np.sqrt(1.-n0dotn1**2)
ux = np.array([[0., -rotAx[2], rotAx[1]], [rotAx[2], 0., -rotAx[0]], [-rotAx[1], rotAx[0], 0.]],dtype=float)
return np.eye(3,dtype=float) + sinT*ux + (1.-cosT)*(ux.dot(ux))
|
946110994a3567871df4b60a3c6814f9ab092ad1
| 3,647,587
|
def P_to_array(P: NestedDicts) -> np.array:
""" Converts a transition matrix in nested dictionary format to a numpy array.
P is usually given as starting state -> action -> ending state w/ data, we reorder this to
action -> starting state -> ending state -> transition probability.
"""
# Action, Starting State, Ending State, value is probability
out = np.zeros(shape=(len(P[0]), len(P), len(P)))
for start_state, actions in P.items():
for action, results in actions.items():
for prob, end_state, _, __ in results:
out[action, start_state, end_state] += prob
return out
|
3a107b3cff6b46b8afc93705bebef84bcbcad6ca
| 3,647,588
|
def get_available_smc_versions():
"""
Return list of available SMC versions. SMC versioning is done by
d70/smc:v6.1.2. Version returned is after the colon.
"""
return [repotag for image in get_images(filter='d70/smc')
for repotag in image.get('RepoTags')]
|
3ddb2908501ebf2ce648f7ebfe00000eb429ffad
| 3,647,589
|
def boolean_fn2(a, b, c):
""" Return the truth value of (a ∧ b) ∨ (-a ∧ -b) """
return a and b or not a and not b
|
c1ef37b3503866e9460fb95c4ab609278c6cff52
| 3,647,590
|
from utils.ica_base import get_configuration
def get_ica_gds_configuration() -> libgds.Configuration:
"""
Get the configuration object for ica wes
:return:
"""
return get_configuration(libgds.Configuration)
|
9e2efd47bca098fb8a03dd4412269a18663e8dfa
| 3,647,591
|
import torch
import time
def retry_load_images(image_paths, retry=10, backend="pytorch"):
"""
This function is to load images with support of retrying for failed load.
Args:
image_paths (list): paths of images needed to be loaded.
retry (int, optional): maximum time of loading retrying. Defaults to 10.
backend (str): `pytorch` or `cv2`.
Returns:
imgs (list): list of loaded images.
"""
for i in range(retry):
imgs = []
for image_path in image_paths:
with g_pathmgr.open(image_path, "rb") as f:
img_str = np.frombuffer(f.read(), np.uint8)
img = cv2.imdecode(img_str, flags=cv2.IMREAD_COLOR)
imgs.append(img)
if all(img is not None for img in imgs):
if backend == "pytorch":
imgs = torch.as_tensor(np.stack(imgs))
return imgs
else:
logger.warn("Reading failed. Will retry.")
time.sleep(1.0)
if i == retry - 1:
raise Exception("Failed to load images {}".format(image_paths))
|
5a34ababc157548c6d9f673c3ff0934df9eccb3d
| 3,647,592
|
def b2p(exts):
"""Convert two points of a polygon into its bounding box.
(Rectangular polygon parallel with axes.)
"""
p0x = exts[0][0]
p0y = exts[0][1]
p0 = str(p0x) + ' ' + str(p0y) + ' ' + '0.0'
p1x = exts[0][2]
p1y = exts[0][3]
p1 = str(p1x) + ' ' + str(p1y) + ' ' + '0.0'
pb = str(p1x) + ' ' + str(p0y) + ' ' + '0.0'
pu = str(p0x) + ' ' + str(p1y) + ' ' + '0.0'
e = "%s %s %s %s %s" % (p0, pb, p1, pu, p0)
i = []
if exts[1] is not None:
for h in exts[1]:
p0x = h[0]
p0y = h[1]
p0 = str(p0x) + ' ' + str(p0y) + ' ' + '0.0'
p1x = h[2]
p1y = h[3]
p1 = str(p1x) + ' ' + str(p1y) + ' ' + '0.0'
pb = str(p1x) + ' ' + str(p0y) + ' ' + '0.0'
pu = str(p0x) + ' ' + str(p1y) + ' ' + '0.0'
i.append("%s %s %s %s %s" % (p0, pu, p1, pb, p0))
return e, i
|
11a51cffb8143b01b60904bef4c92e6f7335dc1d
| 3,647,593
|
import re
def read_conf_file_interface(config_name):
"""
Get interface settings.
@param config_name: Name of WG interface
@type config_name: str
@return: Dictionary with interface settings
@rtype: dict
"""
conf_location = WG_CONF_PATH + "/" + config_name + ".conf"
with open(conf_location, 'r', encoding='utf-8') as file_object:
file = file_object.read().split("\n")
data = {}
for i in file:
if not regex_match("#(.*)", i):
if len(i) > 0:
if i != "[Interface]":
tmp = re.split(r'\s*=\s*', i, 1)
if len(tmp) == 2:
data[tmp[0]] = tmp[1]
return data
|
7f51585d05472fa7fbc26e89b150e540f7013be1
| 3,647,595
|
import re
def book_transformer(query_input, book_dict_input):
"""grabs the book and casts it to a list"""
sample_version = versions_dict.versions_dict()
query_input[1] = query_input[1].replace('[', '').replace(']', '').lstrip().rstrip().upper()
for i in list(book_dict_input.keys()):
result = re.search(i, query_input[1])
if result is not None:
book = book_dict_input[result.group(0)]
reduced_query = query_input[1].replace(result.group(0), '')
return [query_input[0], book, reduced_query]
return [sample_version['KJV'], 'error book not found']
|
259e5520aa762749169b0d529c8f1e8836815a16
| 3,647,596
|
import json
def custom_response(message, status, mimetype):
"""handle custom errors"""
resp = Response(json.dumps({"message": message, "status_code": status}),
status=status,
mimetype=mimetype)
return resp
|
6ec8aa2784e6dd0420c3d246ab5a2a2b6e20db1e
| 3,647,597
|
def embed_network(input_net, layers, reuse_variables=False):
"""Convolutional embedding."""
n_layers = int(len(layers)/3)
tf.logging.info('Number of layers: %d' % n_layers)
# set normalization and activation functions
normalizer_fn = None
activation_fn = tf.nn.softplus
tf.logging.info('Softplus activation')
net = input_net
for ilayer in range(n_layers):
tf.logging.info('Building layer: %d, %d, %d'
% (int(layers[ilayer*3 + 1]), int(layers[ilayer*3]),
int(layers[ilayer*3 + 2])))
net = slim.conv2d(net, int(layers[ilayer*3 + 1]),
int(layers[ilayer*3]),
stride=int(layers[ilayer*3 + 2]),
scope='layer_wt_%d' % ilayer,
reuse=reuse_variables,
normalizer_fn=normalizer_fn,
activation_fn=activation_fn)
return net
|
d525dbf59ce860af6e0bc0de6c21fa55454c3f55
| 3,647,598
|
def sample_variance(sample1, sample2):
"""
Calculate sample variance. After learn.co
"""
n_1, n_2 = len(sample1), len(sample2)
var_1, var_2 = variance(sample1), variance(sample2)
return (var_1 + var_2)/((n_1 + n_2)-2)
|
e464ac7434139409a430341bb39b107d9a15eacf
| 3,647,599
|
def calculate_area(geometry):
"""
Calculate geometry area
:param geometry: GeoJSON geometry
:return: the geometry area
"""
coords = get_coords_from_geometry(
geometry, ["Polygon", "MultiPolygon"], raise_exception=False
)
if get_input_dimensions(coords) >= 4:
areas = list(map(lambda sub_item: calculate_area(sub_item), coords))
return sum(areas)
elif get_input_dimensions(coords) == 3:
polygon(coords)
return polygon_area(coords)
else:
return 0
|
6bc08b57c3416c14f5eca00acbe914a06053b81e
| 3,647,601
|
from pathlib import Path
import re
def read_prb(file):
"""
Read a PRB file and return a ProbeGroup object.
Since PRB do not handle contact shape then circle of 5um are put.
Same for contact shape a dummy tip is put.
PRB format do not contain any information about the channel of the probe
Only the channel index on device is given.
"""
file = Path(file).absolute()
assert file.is_file()
with file.open('r') as f:
contents = f.read()
contents = re.sub(r'range\(([\d,]*)\)', r'list(range(\1))', contents)
prb = {}
exec(contents, None, prb)
prb = {k.lower(): v for (k, v) in prb.items()}
if 'channel_groups' not in prb:
raise ValueError('This file is not a standard PRB file')
probegroup = ProbeGroup()
for i, group in prb['channel_groups'].items():
probe = Probe(ndim=2, si_units='um')
chans = np.array(group['channels'], dtype='int64')
positions = np.array([group['geometry'][c] for c in chans],
dtype='float64')
probe.set_contacts(positions=positions, shapes='circle',
shape_params={'radius': 5})
probe.create_auto_shape(probe_type='tip')
probe.set_device_channel_indices(chans)
probegroup.add_probe(probe)
return probegroup
|
f46c8befbd348c1473867d5c7475911ce960830c
| 3,647,602
|
def CreateNode(parent, node_type, position, wx_id):
""" Create an instance of a node associated with the specified name.
:param parent: parent of the node object (usually a wx.Window)
:param node_type: type of node from registry - the IDName
:param position: default position for the node
:param wx_id: id for the node. Usually an id generated by wxPython.
:returns: Node object
:raises: NodeNotFoundError if the node is not registered in the Node Registry
"""
if node_type in REGISTERED_NODES:
# Initialize the base class here so that a new instance
# is created for each node. We also set some important
# values for the position and type of the node.
node = REGISTERED_NODES[node_type]
node = node(wx_id)
node.SetPosition(position)
node.Model.SetType(node_type)
node.Model.SetParent(parent)
return node
else:
raise exceptions.NodeNotFoundError(node_type)
|
d0a68d584bde29ef47b8e1a1a3129261cbdb6df4
| 3,647,604
|
import json
def decode_json_content(content):
"""
Decodes a given string content to a JSON object
:param str content: content to be decoded to JSON.
:return: A JSON object if the string could be successfully decoded and None otherwise
:rtype: json or None
"""
try:
return json.loads(content) if content is not None else None
except JSONDecodeError:
print("The given content could not be decoded as a JSON file")
return None
|
b0a65734876fd012feb89c606a9c7a0dced866b6
| 3,647,605
|
def plot_dist(noise_feats, label=None, ymax=1.1, color=None, title=None, save_path=None):
"""
Kernel density plot of the number of noisy features included in explanations,
for a certain number of test samples
"""
if not any(noise_feats): # handle special case where noise_feats=0
noise_feats[0] = 0.5
# plt.switch_backend("agg")
sns.set_style('darkgrid')
ax = sns.distplot(noise_feats, hist=False, kde=True,
kde_kws={'label': label}, color=color)
plt.xlim(-3, 11)
plt.ylim(ymin=0.0, ymax=ymax)
if title:
plt.title(title)
if save_path:
plt.savefig(save_path)
return ax
|
33623c77434b936a730064890a80d34d1f5ac143
| 3,647,606
|
import chunk
def simple_simulate(choosers, spec, nest_spec,
skims=None, locals_d=None,
chunk_size=0, custom_chooser=None,
log_alt_losers=False,
want_logsums=False,
estimator=None,
trace_label=None, trace_choice_name=None, trace_column_names=None):
"""
Run an MNL or NL simulation for when the model spec does not involve alternative
specific data, e.g. there are no interactions with alternative
properties and no need to sample from alternatives.
"""
trace_label = tracing.extend_trace_label(trace_label, 'simple_simulate')
assert len(choosers) > 0
result_list = []
# segment by person type and pick the right spec for each person type
for i, chooser_chunk, chunk_trace_label \
in chunk.adaptive_chunked_choosers(choosers, chunk_size, trace_label):
choices = _simple_simulate(
chooser_chunk, spec, nest_spec,
skims=skims,
locals_d=locals_d,
custom_chooser=custom_chooser,
log_alt_losers=log_alt_losers,
want_logsums=want_logsums,
estimator=estimator,
trace_label=chunk_trace_label,
trace_choice_name=trace_choice_name,
trace_column_names=trace_column_names)
result_list.append(choices)
chunk.log_df(trace_label, f'result_list', result_list)
if len(result_list) > 1:
choices = pd.concat(result_list)
assert len(choices.index == len(choosers.index))
return choices
|
92e06a6b57add21e0b3bd1fcd537d6818786d19c
| 3,647,607
|
def state(predicate):
"""DBC helper for reusable, simple predicates for object-state tests used in both preconditions and postconditions"""
@wraps(predicate)
def wrapped_predicate(s, *args, **kwargs):
return predicate(s)
return wrapped_predicate
|
0c9116ccd3fba1b431ce0a492bc6337406954cd8
| 3,647,608
|
import math
def dpp(kernel_matrix, max_length, epsilon=1E-10):
"""
Our proposed fast implementation of the greedy algorithm
:param kernel_matrix: 2-d array
:param max_length: positive int
:param epsilon: small positive scalar
:return: list
"""
item_size = kernel_matrix.shape[0]
cis = np.zeros((max_length, item_size))
di2s = np.copy(np.diag(kernel_matrix))
selected_items = list()
selected_item = np.argmax(di2s)
selected_items.append(selected_item)
while len(selected_items) < max_length:
k = len(selected_items) - 1
ci_optimal = cis[:k, selected_item]
di_optimal = math.sqrt(di2s[selected_item])
elements = kernel_matrix[selected_item, :]
eis = (elements - np.dot(ci_optimal, cis[:k, :])) / di_optimal
cis[k, :] = eis
di2s -= np.square(eis)
di2s[selected_item] = -np.inf
selected_item = np.argmax(di2s)
if di2s[selected_item] < epsilon:
break
selected_items.append(selected_item)
return selected_items
|
fd6c141f1a2f80971ed8e6e5d36b0d074bcdc4b9
| 3,647,609
|
def adjust_image_resolution(data):
"""Given image data, shrink it to no greater than 1024 for its larger
dimension."""
inputbytes = cStringIO.StringIO(data)
output = cStringIO.StringIO()
try:
im = Image.open(inputbytes)
im.thumbnail((240, 240), Image.ANTIALIAS)
# could run entropy check to see if GIF makes more sense given an item.
im.save(output, 'JPEG')
except IOError:
return None
return output.getvalue()
|
d2fedb68e79b1aed0ce0a209d43bb6b16d492f16
| 3,647,610
|
from datetime import datetime
def parse_date(txt):
""" Returns None or parsed date as {h, m, D, M, Y}. """
date = None
clock = None
for word in txt.split(' '):
if date is None:
try:
date = datetime.strptime(word, "%d-%m-%Y")
continue
except ValueError:
pass
try:
date = datetime.strptime(word, "%d.%m.%Y")
continue
except ValueError:
pass
if clock is None:
try:
clock = datetime.strptime(word, "%H:%M")
continue
except ValueError:
pass
if date is not None and clock is not None:
return {'h': clock.hour,
'm': clock.minute,
'D': date.day,
'M': date.month,
'Y': date.year}
return None
|
80660673d6b4179fa7b4907983ed84bc41c4189b
| 3,647,612
|
def calc_angle(m, n):
"""
Calculate the cosθ,
where θ is the angle between 2 vectors, m and n.
"""
if inner_p_s(m, n) == -1:
print('Error! The 2 vectors should belong on the same space Rn!')
elif inner_p_s(m,n) == 0:
print('The cosine of the two vectors is 0, so these vectors are orthogonal!')
else:
angle = (inner_p_s(m, n))/(calc_norm(m) * calc_norm(n))
return angle
|
e0361370a9479eaf7e706673d71c88d25c110473
| 3,647,615
|
def Seuil_var(img):
"""
This fonction compute threshold value. In first the image's histogram is calculated. The threshold value is set to the first indexe of histogram wich respect the following criterion : DH > 0, DH(i)/H(i) > 0.1 , H(i) < 0.01 % of the Norm.
In : img : ipl Image : image to treated
Out: seuil : Int : Value of the threshold
"""
dim=255
MaxValue=np.amax(np.asarray(img[:]))
Norm = np.asarray(img[:]).shape[0]*np.asarray(img[:]).shape[1]
scale=MaxValue/dim
Wdim=dim*scale
MaxValue=np.amax(np.asarray(img[:]))
bins= [float(x) for x in range(dim)]
hist,bin_edges = np.histogram(np.asarray(img[:]), bins)
Norm = Norm -hist[0]
median=np.median(hist)
mean=0
var=0
i=1
som = 0
while (som < 0.8*Norm and i <len(hist)-1):
som = som + hist[i]
i=i+1
while ((hist[i]-hist[i-1] < 0 or (hist[i]-hist[i-1])/hist[i-1]>0.1 or hist[i]> 0.01*Norm ) and i < len(hist)-1):
i=i+1
if( i == len(hist)-1):
seuil=0
seuil = i
var = 0
return seuil
|
435e8eeca0ddff618a2491b0529f1252d8566721
| 3,647,616
|
def convert_numpy(file_path, dst=None, orient='row', hold=False, axisf=False, *arg):
"""
Extract an array of data stored in a .npy file or DATABLOCK
Parameters
---------
file_path : path (str)
Full path to the file to be extracted.
dst : str
Full path to the file where data will be appended as bytes.
In the case of None value, a temporary file is created and the path is returned.
orient : str
orientation of the spectra in the file. Defaults to spectra as row.
hold : bool
If true, limits parts of the code to only get data type and parameters. (faster)
axisf : bool
Extracts the 1st axis and set it as the file axis as it is being converted.
Return
------
Asgard_param : dict
Stores update to Asgard parameters (i.e. spec_amount, spec_len, Axis for sif)
dst : path (str)
Full path to the file where data were writen, may it be temporary or user selected
"""
if dst is None and hold is False:
dst = TemporaryFile('wb', delete=False).name
try :
arr = load(file_path, allow_pickle=True, mmap_mode='r')
except ValueError :
raise Exc.FileFormatError('Selected file is not a valid numpy array')
if orient != 'row' :
arr = arr.T
if len(arr.shape) == 1:
arr = arr.reshape([1, arr.shape[0]])
if len(arr.shape) != 2 :
raise Exc.FileFormatError('Selected file contains an array with more than 2 dimensions')
Asgard_param = {'Spec len':arr.shape[1], 'Spec amount':arr.shape[0]}
if hold is True :
if axisf is True :
Asgard_param['Spec amount'] -= 1
axis = arr[0,:]
return Asgard_param, axis
else :
return Asgard_param
else :
with open(dst,'ab') as f :
for spec in range(arr.shape[0]):
if axisf is True :
Asgard_param['Spec amount'] -= 1
axis = arr[spec,:]
else :
for pix in arr[spec,:]:
f.write(bytearray(pack('f',pix)))
if axisf is True :
return dst, Asgard_param, axis
else :
return dst, Asgard_param
|
2ac1b25277b466cdcd5c6d78844a7bccee9817a6
| 3,647,617
|
def index():
"""Every time the html page refreshes this function is called.
Checks for any activity from the user (setting an alarm, deleting an alarm,
or deleting a notification)
:return: The html template with alarms and notifications added
"""
notification_scheduler.run(blocking=False)
# get the inputs from the users alarm submission
alarm_time = request.args.get("alarm")
alarm_title = request.args.get("two")
alarm_news = request.args.get("news")
alarm_weather = request.args.get("weather")
check_for_delete_request()
if alarm_title and alarm_time:
alarm = {"alarm time": str(alarm_time), "title": str(alarm_title), "content": "",
"weather": alarm_weather is not None, "news": alarm_news is not None}
notification_delay = get_notification_delay(alarm["alarm time"])
# if the notification delays is negative then it is set in the past which is invalid
if notification_delay > 0 and valid_alarm_title(alarm["title"]):
alarm_date_time = alarm_time.split("T")
alarm["content"] = format_alarm_content(alarm_date_time, alarm_news, alarm_weather)
notification_scheduler.enter(notification_delay, len(notifications),
set_off_alarms, (alarm,))
log.info("Alarm set: %s", alarm)
log.info("Delay for alarm: %d seconds", notification_delay)
alarms.append(alarm)
else:
log.error("INVALID ALARM: %s", alarm)
return render_template('index.html', title='Daily update', alarms=alarms,
notifications=notifications, image="alarm_clock.jpg",
favicon="static/images/favicon.jpg")
|
845ba53918bb44d3170a2e93e93346212ccc1247
| 3,647,618
|
import json
import time
def check_icinga_should_run(state_file: str) -> bool:
"""Return True if the script should continue to update the state file, False if the state file is fresh enough."""
try:
with open(state_file) as f:
state = json.load(f)
except Exception as e:
logger.error('Failed to read Icinga state from %s: %s', state_file, e)
return True
delta = time.time() - state['timestamp']
logger.info('Last run was %d seconds ago with exit code %d', delta, state['exit_code'])
if state['exit_code'] == 0:
if delta > ICINGA_RUN_EVERY_MINUTES * 60:
return True
logger.info('Skipping')
return False
if delta > ICINGA_RETRY_ON_FAILURE_MINUTES * 60:
return True
logger.info('Skipping')
return False
|
d508f000eb28da42b43049f49ac180702d49bdc7
| 3,647,619
|
def ln_new_model_to_gll(py, new_flag_dir, output_dir):
"""
make up the new gll directory based on the OUTPUT_MODEL.
"""
script = f"{py} -m seisflow.scripts.structure_inversion.ln_new_model_to_gll --new_flag_dir {new_flag_dir} --output_dir {output_dir}; \n"
return script
|
acdf28cbc2231bd2f33ae418136ce7da0fce421f
| 3,647,620
|
def deserialize_item(item: dict):
"""Deserialize DynamoDB item to Python types.
Args:
item: item to deserialize
Return: deserialized item
"""
return {k: DDB_DESERIALIZER.deserialize(v) for k, v in item.items()}
|
451d97ed656982b5b8df4fb2178051560cb5d8bd
| 3,647,621
|
def good_result(path_value, pred, source=None, target_path=''):
"""Constructs a JsonFoundValueResult where pred returns value as valid."""
source = path_value.value if source is None else source
return jp.PathValueResult(pred=pred, source=source, target_path=target_path,
path_value=path_value, valid=True)
|
2cfeab7df8b52d64cabad973bffeb1723d9e3215
| 3,647,622
|
def bot_properties(bot_id):
"""
Return all available properties for the given bot. The bot id should be
available in the `app.config` dictionary.
"""
bot_config = app.config['BOTS'][bot_id]
return [pd[0] for pd in bot_config['properties']]
|
a7922173d31fbb0d6b20ef1112cef6f88fe4749a
| 3,647,623
|
def find_path(ph_tok_list, dep_parse, link_anchor, ans_anchor, edge_dict, ph_dict):
"""
:param dep_parse: dependency graph
:param link_anchor: token index of the focus word (0-based)
:param ans_anchor: token index of the answer (0-based)
:param link_category: the category of the current focus link
:param edge_dict: <head-dep, rel> dict
:param ph_dict: <token_idx, ph> dict
:return:
"""
if ans_anchor != link_anchor:
edges = []
for head, rel, dep in triples(dep_parse=dep_parse):
edges.append((head, dep))
graph = nx.Graph(edges)
path_nodes = nx.shortest_path(graph, source=ans_anchor+1, target=link_anchor+1) #[0, 1, 2, 3, 4]
else:
path_nodes = [link_anchor]
path_tok_list = []
path_len = len(path_nodes)
if path_len > 0:
for position in range(path_len-1):
edge = edge_dict['%d-%d' % (path_nodes[position], path_nodes[position+1])]
cur_token_idx = path_nodes[position] - 1
if cur_token_idx in ph_dict:
path_tok_list.append(ph_dict[cur_token_idx])
else:
path_tok_list.append(ph_tok_list[cur_token_idx])
path_tok_list.append(edge)
if link_anchor in ph_dict:
path_tok_list.append(ph_dict[link_anchor])
else:
path_tok_list.append('<E>')
return path_tok_list
|
51b621f1f1cdffd645b1528884603a383abf12a5
| 3,647,624
|
import logging
def download_video_url(
video_url: str,
pipeline: PipelineContext,
destination="%(title)s.%(ext)s",
progress=ProgressMonitor.NULL,
):
"""Download a single video from the ."""
config = pipeline.config
logger = logging.getLogger(__name__)
logger.info("Starting video download from URL: %s", video_url)
# Setup progress-tracking
progress.scale(total_work=1.0)
progress_tracker = YDLProgressTracker(show_progress_bar=True)
# Resolve destination path template
output_template = complete_template(config.sources.root, destination)
logger.info("Output template: %s", output_template)
ydl_opts = {
"format": "mp4",
"logger": YDLLogger(logger),
"progress_hooks": [progress_tracker.hook],
"outtmpl": output_template,
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
# Determine destination file name
video_info = ydl.extract_info(video_url, download=False)
file_name = ydl.prepare_filename(video_info)
logger.info("Downloading file: %s", file_name)
# Download file
with progress_tracker.track(progress):
ydl.download([video_url])
progress.complete()
return file_name
|
f3546d929fa6c976479fe86b945bb87279a22341
| 3,647,626
|
def get_block_name(source):
"""Get block name version from source."""
url_parts = urlparse(source)
file_name = url_parts.path
extension = file_name.split(".")[-1]
new_path = file_name.replace("." + extension, "_block." + extension)
new_file_name = urlunparse(
(
url_parts.scheme,
url_parts.netloc,
new_path,
url_parts.params,
url_parts.query,
url_parts.fragment,
)
)
return new_file_name
|
ae2792a4c56baaa9045ed49961ad1c5029191d3d
| 3,647,627
|
import token
def int_to_symbol(i):
""" Convert numeric symbol or token to a desriptive name.
"""
try:
return symbol.sym_name[i]
except KeyError:
return token.tok_name[i]
|
6f939d359dd92961f199dfd412dced3ecaef3a60
| 3,647,628
|
def cranimp(i, s, m, N):
"""
Calculates the result of c_i,s^dag a_s acting on an integer m. Returns the new basis state and the fermionic prefactor.
Spin: UP - s=0, DOWN - s=1.
"""
offi = 2*(N-i)-1-s
offimp = 2*(N+1)-1-s
m1 = flipBit(m, offimp)
if m1<m:
m2=flipBit(m1, offi)
if m2>m1:
prefactor = prefactor_offset(m1, offi, N)
prefactor *= prefactor_offset_imp(m, s, N)
return prefactor, m2
return 0, 0
|
aa64f6f5e9d0e596a801d854baf4e222e2f2192e
| 3,647,630
|
def _can_beeify():
""" Determines if the random chance to beeify has occured """
return randint(0, 12) == 0
|
c79a116a6d1529d69f88c35a1264735d475b26d4
| 3,647,631
|
def get_object_classes(db):
"""return a list of all object classes"""
list=[]
for item in classinfo:
list.append(item)
return list
|
e95676f19f3bf042a5f531d708f2e12a0ab3813f
| 3,647,632
|
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
|
0a79458ad335856198d5208071581685cd7c34a0
| 3,647,634
|
def spin_coherent(j, theta, phi, type='ket'):
"""Generates the spin state |j, m>, i.e. the eigenstate
of the spin-j Sz operator with eigenvalue m.
Parameters
----------
j : float
The spin of the state.
theta : float
Angle from z axis.
phi : float
Angle from x axis.
type : string {'ket', 'bra', 'dm'}
Type of state to generate.
Returns
-------
state : qobj
Qobj quantum object for spin coherent state
"""
Sp = jmat(j, '+')
Sm = jmat(j, '-')
psi = (0.5 * theta * np.exp(1j * phi) * Sm -
0.5 * theta * np.exp(-1j * phi) * Sp).expm() * spin_state(j, j)
if type == 'ket':
return psi
elif type == 'bra':
return psi.dag()
elif type == 'dm':
return ket2dm(psi)
else:
raise ValueError("invalid value keyword argument 'type'")
|
e64d207aeb27a5cf2ccdb1dff13da52be294c903
| 3,647,636
|
from operator import add
def vgg_upsampling(classes, target_shape=None, scale=1, weight_decay=0., block_name='featx'):
"""A VGG convolutional block with bilinear upsampling for decoding.
:param classes: Integer, number of classes
:param scale: Float, scale factor to the input feature, varing from 0 to 1
:param target_shape: 4D Tuples with targe_height, target_width as
the 2nd, 3rd elements if `channels_last` or as the 3rd, 4th elements if
`channels_first`.
>>> from keras_fcn.blocks import vgg_upsampling
>>> feat1, feat2, feat3 = feat_pyramid[:3]
>>> y = vgg_upsampling(classes=21, target_shape=(None, 14, 14, None),
>>> scale=1, block_name='feat1')(feat1, None)
>>> y = vgg_upsampling(classes=21, target_shape=(None, 28, 28, None),
>>> scale=1e-2, block_name='feat2')(feat2, y)
>>> y = vgg_upsampling(classes=21, target_shape=(None, 224, 224, None),
>>> scale=1e-4, block_name='feat3')(feat3, y)
"""
def f(x, y):
score = Conv2D(filters=classes, kernel_size=(1, 1),
activation='linear',
padding='valid',
kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay),
name='score_{}'.format(block_name))(x)
if y is not None:
def scaling(xx, ss=1):
return xx * ss
scaled = Lambda(scaling, arguments={'ss': scale},
name='scale_{}'.format(block_name))(score)
score = add([y, scaled])
upscore = BilinearUpSampling2D(
target_shape=target_shape,
name='upscore_{}'.format(block_name))(score)
return upscore
return f
|
9c372520adc3185a8b61b57ed73cc303f47c8275
| 3,647,637
|
def compute_metrics(logits, labels, weights):
"""Compute summary metrics."""
loss, weight_sum = compute_weighted_cross_entropy(logits, labels, weights)
acc, _ = compute_weighted_accuracy(logits, labels, weights)
metrics = {
'loss': loss,
'accuracy': acc,
'denominator': weight_sum,
}
return metrics
|
c969b2aadf9b16b1c26755dc1db4f1f24faa2c11
| 3,647,639
|
def start_session(web_session=None):
"""Starts a SQL Editor Session
Args:
web_session (object): The web_session object this session will belong to
Returns:
A dict holding the result message
"""
new_session = SqleditorModuleSession(web_session)
result = Response.ok("New SQL Editor session created successfully.", {
"module_session_id": new_session.module_session_id
})
return result
|
596603e5bc1d21df95728b4797a64cb4ff78fa2a
| 3,647,640
|
async def error_middleware(request: Request, handler: t.Callable[[Request], t.Awaitable[Response]]) -> Response:
"""logs an exception and returns an error message to the client
"""
try:
return await handler(request)
except Exception as e:
logger.exception(e)
return json_response(text=str(e), status=HTTPInternalServerError.status_code)
|
28748bd2018a0527ef740d8bed9c74983900e655
| 3,647,641
|
def init_mobility_accordion():
"""
Initialize the accordion for mobility tab.
Args: None
Returns:
mobility_accordion (object): dash html.Div that contains individual accordions
"""
accord_1 = init_accordion_element(
title="Mobility Index",
id='id_mobility_index',
tab_n=4,
group_n=1
)
accord_2 = init_accordion_element(
title="Comming Soon!",
id='id_metro_accordion',
tab_n=4,
group_n=2
)
accord_3 = init_accordion_element(
title="Comming Soon!",
id='id_tram_accordion',
tab_n=4,
group_n=3
)
accord_4 = init_accordion_element(
title="Comming Soon!",
id='id_bikes_accordion',
tab_n=4,
group_n=4
)
mobility_accordion = [
accord_1,
accord_2,
accord_3,
accord_4
]
return assemble_accordion(mobility_accordion)
|
25c5475e8ea972d057d230526d8dcc82b27d8ee0
| 3,647,642
|
def per_image_whiten(X):
""" Subtracts the mean of each image in X and renormalizes them to unit norm.
"""
num_examples, height, width, depth = X.shape
X_flat = X.reshape((num_examples, -1))
X_mean = X_flat.mean(axis=1)
X_cent = X_flat - X_mean[:, None]
X_norm = np.sqrt( np.sum( X_cent * X_cent, axis=1) )
X_out = X_cent / X_norm[:, None]
X_out = X_out.reshape(X.shape)
return X_out
|
f831860c3697e6eac637b2fb3e502570fa4f31af
| 3,647,643
|
def fill_defaults(data, vals) -> dict:
"""Fill defaults if source is not present"""
for val in vals:
_name = val['name']
_type = val['type'] if 'type' in val else 'str'
_source = val['source'] if 'source' in val else _name
if _type == 'str':
_default = val['default'] if 'default' in val else ''
if 'default_val' in val and val['default_val'] in val:
_default = val[val['default_val']]
if _name not in data:
data[_name] = from_entry([], _source, default=_default)
elif _type == 'bool':
_default = val['default'] if 'default' in val else False
_reverse = val['reverse'] if 'reverse' in val else False
if _name not in data:
data[_name] = from_entry_bool([], _source, default=_default, reverse=_reverse)
return data
|
aa5df5bca76f1eaa426bf4e416a540fb725eb730
| 3,647,644
|
def static_shuttle_between():
"""
Route endpoint to show real shuttle data within a certain time range at once.
Returns:
rendered website displaying all points at once.
Example:
http://127.0.0.1:5000/?start_time=2018-02-14%2015:40:00&end_time=2018-02-14%2016:02:00
"""
start_time = request.args.get('start_time', default="2018-02-14 13:00:00")
end_time = request.args.get('end_time', default="2018-02-14 17:00:00")
return render_to_static(start_time, end_time)
|
eea24bb0abe90fe7b708ff8a9c73c2795f07865a
| 3,647,645
|
def read_data(inargs, infiles, ref_cube=None):
"""Read data."""
clim_dict = {}
trend_dict = {}
for filenum, infile in enumerate(infiles):
cube = iris.load_cube(infile, gio.check_iris_var(inargs.var))
if ref_cube:
branch_time = None if inargs.branch_times[filenum] == 'default' else str(inargs.branch_times[filenum])
time_constraint = timeseries.get_control_time_constraint(cube, ref_cube, inargs.time, branch_time=branch_time)
cube = cube.extract(time_constraint)
iris.util.unify_time_units([ref_cube, cube])
cube.coord('time').units = ref_cube.coord('time').units
cube.replace_coord(ref_cube.coord('time'))
else:
time_constraint = gio.get_time_constraint(inargs.time)
cube = cube.extract(time_constraint)
#cube = uconv.convert_to_joules(cube)
if inargs.perlat:
grid_spacing = grids.get_grid_spacing(cube)
cube.data = cube.data / grid_spacing
trend_cube = calc_trend_cube(cube.copy())
clim_cube = cube.collapsed('time', iris.analysis.MEAN)
clim_cube.remove_coord('time')
model = cube.attributes['model_id']
realization = 'r' + str(cube.attributes['realization'])
physics = 'p' + str(cube.attributes['physics_version'])
key = (model, physics, realization)
trend_dict[key] = trend_cube
clim_dict[key] = clim_cube
experiment = cube.attributes['experiment_id']
experiment = 'historicalAA' if experiment == "historicalMisc" else experiment
trend_ylabel = get_ylabel(cube, 'trend', inargs)
clim_ylabel = get_ylabel(cube, 'climatology', inargs)
metadata_dict = {infile: cube.attributes['history']}
return cube, trend_dict, clim_dict, experiment, trend_ylabel, clim_ylabel, metadata_dict
|
a3ffc2172394fe5a44e8239152a3f7b7ee660559
| 3,647,646
|
import json
async def create_account(*, user):
"""
Open an account for a user
Save account details in json file
"""
with open("mainbank.json", "r") as f:
users = json.load(f)
if str(user.id) in users:
return False
else:
users[str(user.id)] = {"wallet": 0, "bank": 0}
with open("mainbank.json", "w") as f:
json.dump(users, f)
|
0e1aaccfd0c9cda6238ba8caa90e80979540f2e8
| 3,647,647
|
import ntpath
import genericpath
def commonpath(paths):
"""Given a sequence of path names, returns the longest common sub-path."""
if not paths:
raise ValueError('commonpath() arg is an empty sequence')
if isinstance(paths[0], bytes):
sep = b'\\'
altsep = b'/'
curdir = b'.'
else:
sep = '\\'
altsep = '/'
curdir = '.'
try:
drivesplits = [ntpath.splitdrive(p.replace(altsep, sep).lower()) for p in paths]
split_paths = [p.split(sep) for d, p in drivesplits]
try:
isabs, = set(p[:1] == sep for d, p in drivesplits)
except ValueError:
raise ValueError("Can't mix absolute and relative paths")
# Check that all drive letters or UNC paths match. The check is made only
# now otherwise type errors for mixing strings and bytes would not be
# caught.
if len(set(d for d, p in drivesplits)) != 1:
raise ValueError("Paths don't have the same drive")
drive, path = ntpath.splitdrive(paths[0].replace(altsep, sep))
common = path.split(sep)
common = [c for c in common if c and c != curdir]
split_paths = [[c for c in s if c and c != curdir] for s in split_paths]
s1 = min(split_paths)
s2 = max(split_paths)
for i, c in enumerate(s1):
if c != s2[i]:
common = common[:i]
break
else:
common = common[:len(s1)]
prefix = drive + sep if isabs else drive
return prefix + sep.join(common)
except (TypeError, AttributeError):
genericpath._check_arg_types('commonpath', *paths)
raise
|
a8ef082e2944138ea08d409e273d724fd5d489eb
| 3,647,648
|
from .slicing import sanitize_index
from functools import reduce
from operator import mul
import tokenize
from re import M
def reshape(x, shape):
""" Reshape array to new shape
This is a parallelized version of the ``np.reshape`` function with the
following limitations:
1. It assumes that the array is stored in `row-major order`_
2. It only allows for reshapings that collapse or merge dimensions like
``(1, 2, 3, 4) -> (1, 6, 4)`` or ``(64,) -> (4, 4, 4)``
.. _`row-major order`: https://en.wikipedia.org/wiki/Row-_and_column-major_order
When communication is necessary this algorithm depends on the logic within
rechunk. It endeavors to keep chunk sizes roughly the same when possible.
See Also
--------
dask.array.rechunk
numpy.reshape
"""
# Sanitize inputs, look for -1 in shape
shape = tuple(map(sanitize_index, shape))
known_sizes = [s for s in shape if s != -1]
if len(known_sizes) < len(shape):
if len(known_sizes) - len(shape) > 1:
raise ValueError("can only specify one unknown dimension")
# Fastpath for x.reshape(-1) on 1D arrays, allows unknown shape in x
# for this case only.
if len(shape) == 1 and x.ndim == 1:
return x
missing_size = sanitize_index(x.size / reduce(mul, known_sizes, 1))
shape = tuple(missing_size if s == -1 else s for s in shape)
if np.isnan(sum(x.shape)):
raise ValueError("Array chunk size or shape is unknown. shape: %s", x.shape)
if reduce(mul, shape, 1) != x.size:
raise ValueError("total size of new array must be unchanged")
if x.shape == shape:
return x
meta = meta_from_array(x, len(shape))
name = "reshape-" + tokenize(x, shape)
if x.npartitions == 1:
key = next(flatten(x.__dask_keys__()))
dsk = {(name,) + (0,) * len(shape): (M.reshape, key, shape)}
chunks = tuple((d,) for d in shape)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
return Array(graph, name, chunks, meta=meta)
# Logic for how to rechunk
inchunks, outchunks = reshape_rechunk(x.shape, shape, x.chunks)
x2 = x.rechunk(inchunks)
# Construct graph
in_keys = list(product([x2.name], *[range(len(c)) for c in inchunks]))
out_keys = list(product([name], *[range(len(c)) for c in outchunks]))
shapes = list(product(*outchunks))
dsk = {a: (M.reshape, b, shape) for a, b, shape in zip(out_keys, in_keys, shapes)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x2])
return Array(graph, name, outchunks, meta=meta)
|
2e8ed79f95319e02cacf78ce790b6dc550ac4e29
| 3,647,649
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.