content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def dummy_img(w, h, intensity=200):
"""Creates a demodata test image"""
img = np.zeros((int(h), int(w)), dtype=np.uint8) + intensity
return img
|
a416753d8aa8682aef2b344f7de416139c9ed33a
| 3,641,091
|
def getHandler(database):
"""
a function instantiating and returning this plugin
"""
return Events(database, 'events', public_endpoint_extensions=['insert'])
|
e42e69b379e2053437ac75fe3fe8fc81229579c1
| 3,641,092
|
import logging
def RegQueryValueEx(key, valueName=None):
""" Retrieves the type and data for the specified registry value.
Parameters
key A handle to an open registry key.
The key must have been opened with the KEY_QUERY_VALUE access right
valueName The name of the registry value. it is optional.
Return Value
If the function succeeds, the return a tuple of the value's name and RegistryValue object data.
If the function fails, a RegistryBaseException exception is raised, unless:
If the key is not open, an InvalidHandleException is raised
If access is denied, an AccesDeniedException isRaised
If the value does not exist, the function raises KeyError
"""
try:
(dataType, data, dataLength) = c_api.RegQueryValueExW(key=key, name=valueName)
data = (dtypes.BYTE * dataLength.value)()
(dataType, data, dataLength) = c_api.RegQueryValueExW(key=key, name=valueName,
data=data, dataLength=dataLength)
return RegistryValueFactory().by_type(dataType)(data)
except errors.WindowsError as exception:
errors.catch_and_raise_general_errors(exception)
logging.exception(exception)
raise errors.RegistryBaseException(exception.winerror, exception.strerror)
|
5cceedabfaef44040067424696d13eb8d0f15550
| 3,641,093
|
def read_molecules(filename):
"""Read a file into an OpenEye molecule (or list of molecules).
Parameters
----------
filename : str
The name of the file to read (e.g. mol2, sdf)
Returns
-------
molecule : openeye.oechem.OEMol
The OEMol molecule read, or a list of molecules if multiple molecules are read.
If no molecules are read, None is returned.
"""
ifs = oechem.oemolistream(filename)
molecules = list()
for mol in ifs.GetOEMols():
mol_copy = oechem.OEMol(mol)
molecules.append(mol_copy)
ifs.close()
if len(molecules) == 0:
return None
elif len(molecules) == 1:
return molecules[0]
else:
return molecules
|
420ba85dc768435927441500fe8005e3f009b9af
| 3,641,094
|
def euler(derivative):
"""
Euler method
"""
return lambda t, x, dt: (t + dt, x + derivative(t, x) * dt)
|
08d636ec711f4307ab32f9a8bc3672197a3699d9
| 3,641,095
|
def detect_encoding_type(input_geom):
"""
Detect geometry encoding type:
- ENC_WKB: b'\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00H\x93@\x00\x00\x00\x00\x00\x9d\xb6@'
- ENC_EWKB: b'\x01\x01\x00\x00 \xe6\x10\x00\x00\x00\x00\x00\x00\x00H\x93@\x00\x00\x00\x00\x00\x9d\xb6@'
- ENC_WKB_HEX: '0101000000000000000048934000000000009DB640'
- ENC_EWKB_HEX: '0101000020E6100000000000000048934000000000009DB640'
- ENC_WKB_BHEX: b'0101000000000000000048934000000000009DB640'
- ENC_EWKB_BHEX: b'0101000020E6100000000000000048934000000000009DB640'
- ENC_WKT: 'POINT (1234 5789)'
- ENC_EWKT: 'SRID=4326;POINT (1234 5789)'
"""
if isinstance(input_geom, shapely.geometry.base.BaseGeometry):
return ENC_SHAPELY
if isinstance(input_geom, str):
if _is_hex(input_geom):
return ENC_WKB_HEX
else:
srid, geom = _extract_srid(input_geom)
if not geom:
return None
if srid:
return ENC_EWKT
else:
return ENC_WKT
if isinstance(input_geom, bytes):
try:
ba.unhexlify(input_geom)
return ENC_WKB_BHEX
except Exception:
return ENC_WKB
return None
|
4361abf695edad5912559175bd48cfa0bad92769
| 3,641,096
|
def unet_weights(input_size = (256,256,1), learning_rate = 1e-4, weight_decay = 5e-7):
"""
Weighted U-net architecture.
The tuple 'input_size' corresponds to the size of the input images and labels.
Default value set to (256, 256, 1) (input images size is 256x256).
The float 'learning_rate' corresponds to the learning rate value for the training.
Defaut value set to 1e-4.
The float 'weight_decay' corresponds to the weight decay value for the training.
Default value set to 5e-7.
"""
# Get input.
input_img = Input(input_size)
# Get weights.
weights = Input(input_size)
# Layer 1.
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(input_img)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
# Layer 2.
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
# Layer 3.
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
# Layer 4.
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
# layer 5.
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
# Layer 6.
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
# Layer 7.
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
# Layer 8.
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
# Layer 9.
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
# Final layer (output).
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
# Specify input (image + weights) and output.
model = Model(inputs = [input_img, weights], outputs = conv10)
# Use Adam optimizer, custom weighted binary cross-entropy loss and specify metrics
# Also use weights inside the loss function.
model.compile(optimizer = Adam(lr = learning_rate, decay = weight_decay), loss = binary_crossentropy_weighted(weights), metrics = ['accuracy'])
return model
|
f091627475f13985e33f2960afd4b0136e9d10f4
| 3,641,098
|
from re import T
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis. """
return T.std(x, axis=axis, keepdims=keepdims)
|
ad3c547d19507243ec143d38dd22f9fc92becffb
| 3,641,099
|
def validate_email_address(
value=_undefined,
allow_unnormalized=False,
allow_smtputf8=True,
required=True,
):
"""
Checks that a string represents a valid email address.
By default, only email addresses in fully normalized unicode form are
accepted.
Validation logic is based on the well written and thoroughly researched
[email-validator](https://pypi.org/project/email-validator/) library.
By default, `validate_email_address` will only accept email addresses in
the normalized unicode form returned by `email_validator.validate_email`.
Despite the conflict with this library's naming convention, we recommend
that you use `email-validator` for validation and sanitisation of untrusted
input.
:param str value:
The value to be validated.
:param bool allow_unnormalized:
Whether or not to accept addresses that are not completely normalized.
Defaults to False, as in most cases you will want equivalent email
addresses to compare equal.
:param bool allow_smtputf8:
Whether or not to accept email addresses with local parts that can't be
encoded as plain ascii. Defaults to True, as such email addresses are
now common and very few current email servers do not support them.
:param bool required:
Whether the value can be `None`. Defaults to `True`.
:raises TypeError:
If the value is not a unicode string.
:raises ValueError:
If the value is not an email address, or is not normalized.
"""
validate = _email_address_validator(
allow_unnormalized=allow_unnormalized,
allow_smtputf8=allow_smtputf8,
required=required,
)
if value is not _undefined:
validate(value)
else:
return validate
|
85b590186f2e147c6c19e91bace33f0301115d0e
| 3,641,100
|
import numpy
import math
def rotation_matrix_from_quaternion(quaternion):
"""Return homogeneous rotation matrix from quaternion."""
q = numpy.array(quaternion, dtype=numpy.float64)[0:4]
nq = numpy.dot(q, q)
if nq == 0.0:
return numpy.identity(4, dtype=numpy.float64)
q *= math.sqrt(2.0 / nq)
q = numpy.outer(q, q)
return numpy.array((
(1.0-q[1,1]-q[2,2], q[0,1]-q[2,3], q[0,2]+q[1,3], 0.0),
( q[0,1]+q[2,3], 1.0-q[0,0]-q[2,2], q[1,2]-q[0,3], 0.0),
( q[0,2]-q[1,3], q[1,2]+q[0,3], 1.0-q[0,0]-q[1,1], 0.0),
( 0.0, 0.0, 0.0, 1.0)
), dtype=numpy.float64)
|
51b169ffa702e3798f7a6138271b415b369566ba
| 3,641,101
|
def get_metadata(doi):
"""Extract additional metadata of paper based on doi."""
headers = {"accept": "application/x-bibtex"}
title, year, journal = '', '', ''
sessions = requests.Session()
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
sessions.mount('http://', adapter)
sessions.mount('https://', adapter)
try:
response = requests.get("http://dx.doi.org/" + doi, headers=headers)
except requests.exceptions.ConnectionError:
print "ConnectionError"
return title, year, journal
if (response.status_code != 200):
print 'Did not find '+doi+' article, error code '+str(response.status_code)
else:
try:
line = response.text.encode()
line = line.split('\n\t')
line = line[1:]
except UnicodeEncodeError:
print "UnicodeEncodeError"
return title, year, journal
for field in line:
if len(field) >= 8 and field[0:6] == "year =":
year = field[7:-1]
if len(field) >= 9 and field[0:7] == "title =":
title = field[9:-2]
if len(field) >= 11 and field[0:9] == "journal =":
journal = field[11:-3]
return title, year, journal
|
8b7fee95ca247b0ffebfa704628be8a4659dd008
| 3,641,103
|
def format_channel(channel):
""" Returns string representation of <channel>. """
if channel is None or channel == '':
return None
elif type(channel) == int:
return 'ch{:d}'.format(channel)
elif type(channel) != str:
raise ValueError('Channel must be specified in string format.')
elif 'ch' in channel:
return channel
elif channel.lower() in 'rgb':
return format_channel('rgb'.index(channel.lower()))
elif channel.lower() in ('red', 'green', 'blue'):
return format_channel('rgb'.index(channel.lower()[0]))
else:
raise ValueError('Channel string not recognized.')
|
4eeb42899762d334599df831b7520a956998155a
| 3,641,104
|
def download_emoji_texture(load=True): # pragma: no cover
"""Download emoji texture.
Parameters
----------
load : bool, optional
Load the dataset after downloading it when ``True``. Set this
to ``False`` and only the filename will be returned.
Returns
-------
pyvista.Texture or str
DataSet or filename depending on ``load``.
Examples
--------
>>> from pyvista import examples
>>> dataset = examples.download_emoji_texture()
>>> dataset.plot(cpos="xy")
"""
return _download_and_read('emote.jpg', texture=True, load=load)
|
3ff7805e6ab4f18d0064938f8863cbbe395a7c78
| 3,641,105
|
import random
def shuffle_sequence(sequence: str) -> str:
"""Shuffle the given sequence.
Randomly shuffle a sequence, maintaining the same composition.
Args:
sequence: input sequence to shuffle
Returns:
tmp_seq: shuffled sequence
"""
tmp_seq: str = ""
while len(sequence) > 0:
max_num = len(sequence)
rand_num = random.randrange(max_num)
tmp_char = sequence[rand_num]
tmp_seq += tmp_char
tmp_str_1 = sequence[:rand_num]
tmp_str_2 = sequence[rand_num + 1:]
sequence = tmp_str_1 + tmp_str_2
return tmp_seq
|
9e833aed9e5a17aeb419a77176713e76566d2d06
| 3,641,106
|
def bayesian_twosample(countsX, countsY, prior=None):
"""
Calculates a Bayesian-like two-sample test between `countsX` and `countsY`.
The idea is taken from [1]_. We assume the counts are generated IID. Then
we use Dirichlet prior to infer the underlying discrete distribution.
In the null hypothesis, we say that `countsX` and `countsY` were generated
from the same underlying distribution q. The alternative hypothesis is that
`countsX` and `countsY` were generated by different distribution.
A log Bayes factor is calculated:
\chi = log P(X, Y | H_1) - log P(X, Y | H_0)
If \chi is greater than 0, then reject the null hypothesis.
To calculate P(X, Y | H_1), we calculate the product of evidences from
two independent Bayesian inferences. That is, P(X)P(Y). To calculate
P(X, Y | H_0), we combine the counts and calculate the evidence from a
single Bayesian inference.
Parameters
----------
countsX : array-like, shape (n,)
The counts for X.
countsY : array-like, shape (n,)
The counts for Y.
prior : array-like, shape (n,)
The Dirichlet hyper-parameters to use during inference. If `None`, we
use Jeffrey's prior.
Returns
-------
reject : bool
If `True`, then the null hypothesis is rejected and the counts should
be considered as generated from different distributions.
chi : float
The base-2 logarithm of the evidence ratio. If this value is greater
than 0, then we reject the null hypothesis.
Examples
--------
>>> bayesian_twosample([1,10], [2,3])
(True, 0.11798407303051839)
>>> bayesian_twosample([1,30], [20,30])
(True, 9.4347501426274931)
References
----------
.. [1] Karsten M. Borgwardt and Zoubin Ghahramani, "Bayesian two-sample
tests". http://arxiv.org/abs/0906.4032
"""
if prior is None:
# Use Jeffrey's prior for Dirichlet distributions.
prior = np.ones(len(countsX)) * 0.5
countsX = np.asarray(countsX)
countsY = np.asarray(countsY)
chi = log_evidence(countsX, prior) + log_evidence(countsY, prior)
chi -= log_evidence(countsX + countsY, prior)
reject = chi > 0
return reject, chi
|
fc64ba0d6e64ffcd7d57d51b3ea0f4967d4e5096
| 3,641,107
|
import torch
def load_image(image_path):
"""
loads an image from the specified image path
:param image_path:
:return: the loaded image
"""
image = Image.open(image_path)
image = loader(image)
image = image.unsqueeze(0)
image = image.to(device, torch.float)
return image
|
064f9a4a71fdf9347a455269bf673bd2952dd9b2
| 3,641,108
|
import json
def feed_reader(url):
"""Returns json from feed url"""
content = retrieve_feed(url)
d = feed_parser(content)
json_string = json.dumps(d, ensure_ascii=False)
return json_string
|
47726236afe429ab29720e917238960b17891938
| 3,641,109
|
def create_app(env_name):
"""
Create app
"""
# app initiliazation
app = Flask(__name__)
app.config.from_object(app_config[env_name])
cors = CORS(app)
# initializing bcrypt and db
bcrypt.init_app(app)
db.init_app(app)
app.register_blueprint(book_blueprint, url_prefix='/api/v1/books')
@app.route('/', methods=['GET'])
def index():
"""
example endpoint
"""
return 'Congratulations! Your part 2 endpoint is working'
return app
|
b694e8867b0bd3efefff0f7cae753b0ede91a36c
| 3,641,110
|
def get_shed_tool_conf_dict( app, shed_tool_conf ):
"""Return the in-memory version of the shed_tool_conf file, which is stored in the config_elems entry in the shed_tool_conf_dict associated with the file."""
for index, shed_tool_conf_dict in enumerate( app.toolbox.shed_tool_confs ):
if shed_tool_conf == shed_tool_conf_dict[ 'config_filename' ]:
return index, shed_tool_conf_dict
else:
file_name = strip_path( shed_tool_conf_dict[ 'config_filename' ] )
if shed_tool_conf == file_name:
return index, shed_tool_conf_dict
|
21eae3a037498758425b29f10110f8e4e8ad24ff
| 3,641,111
|
def oio_make_subrequest(env, method=None, path=None, body=None, headers=None,
agent='Swift', swift_source=None,
make_env=oio_make_env):
"""
Same as swift's make_subrequest, but let some more headers pass through.
"""
return orig_make_subrequest(env, method=method, path=path, body=body,
headers=headers, agent=agent,
swift_source=swift_source,
make_env=make_env)
|
f454b027243f2e72b9e59c50dfc3819a22b887d6
| 3,641,114
|
from typing import Optional
def get_channel(id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetChannelResult:
"""
Resource schema for AWS::MediaPackage::Channel
:param str id: The ID of the Channel.
"""
__args__ = dict()
__args__['id'] = id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:mediapackage:getChannel', __args__, opts=opts, typ=GetChannelResult).value
return AwaitableGetChannelResult(
arn=__ret__.arn,
description=__ret__.description,
egress_access_logs=__ret__.egress_access_logs,
hls_ingest=__ret__.hls_ingest,
ingress_access_logs=__ret__.ingress_access_logs)
|
28ca816326203ca37cf514dade19ce9b6205144d
| 3,641,116
|
import torch
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return torch.nn.functional.relu
if activation == "gelu":
return torch.nn.functional.gelu
if activation == "glu":
return torch.nn.functional.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
|
ecc690e9b9ec6148b6ea8df4bd08ff2d0c1c322e
| 3,641,117
|
def make_05dd():
"""移動ロック終了(イベント終了)"""
return ""
|
bb85e01a4a4515ac88688690cacd67e7c9351034
| 3,641,118
|
import json
def lambda_handler(request, context):
"""Main Lambda handler.
Since you can expect both v2 and v3 directives for a period of time during the migration
and transition of your existing users, this main Lambda handler must be modified to support
both v2 and v3 requests.
"""
try:
logger.info("Directive:")
logger.info(json.dumps(request, indent=4, sort_keys=True))
logger.info("Received v3 directive!")
if request["directive"]["header"]["name"] == "Discover":
response = handle_discovery_v3(request)
else:
response = handle_non_discovery_v3(request)
logger.info("Response:")
logger.info(json.dumps(response, indent=4, sort_keys=True))
#if version == "3":
#logger.info("Validate v3 response")
#validate_message(request, response)
return response
except ValueError as error:
logger.error(error)
raise
|
8eddd8ac2a47ecbc9c15f2644da6a2c7c6575371
| 3,641,119
|
from re import T
def batch_flatten(x):
"""Turn a n-D tensor into a 2D tensor where
the first dimension is conserved.
"""
y = T.reshape(x, (x.shape[0], T.prod(x.shape[1:])))
if hasattr(x, '_keras_shape'):
if None in x._keras_shape[1:]:
y._keras_shape = (x._keras_shape[0], None)
else:
y._keras_shape = (x._keras_shape[0], np.prod(x._keras_shape[1:]))
return y
|
6543f3056d9e08b382cbd9e0ac1df6df84c59716
| 3,641,120
|
import typing
def msg_constant_to_behaviour_type(value: int) -> typing.Any:
"""
Convert one of the behaviour type constants in a
:class:`py_trees_ros_interfaces.msg.Behaviour` message to
a type.
Args:
value: see the message definition for details
Returns:
a behaviour class type (e.g. :class:`py_trees.composites.Sequence`)
Raises:
TypeError: if the message type is unrecognised
"""
if value == py_trees_ros_interfaces.msg.Behaviour.SEQUENCE:
return py_trees.composites.Sequence
elif value == py_trees_ros_interfaces.msg.Behaviour.SELECTOR:
return py_trees.composites.Selector
elif value == py_trees_ros_interfaces.msg.Behaviour.PARALLEL:
return py_trees.composites.Parallel
elif value == py_trees_ros_interfaces.msg.Behaviour.DECORATOR:
return py_trees.decorators.Decorator
elif value == py_trees_ros_interfaces.msg.Behaviour.BEHAVIOUR:
return py_trees.behaviour.Behaviour
else:
raise TypeError("invalid type specified in message [{}]".format(value))
|
bdb2342ad9abbbc6db51beebcb82313799bc110e
| 3,641,121
|
def calc_skewness(sig):
"""Compute skewness along the specified axes.
Parameters
----------
input: ndarray
input from which skewness is computed.
Returns
-------
s: int
skewness result.
"""
return skew(sig)
|
cad86d0a358a9bfe1891411522a6281e1eb291ed
| 3,641,122
|
import pandas as pd
import pkgutil
def check_is_pandas_dataframe(log):
"""
Checks if a log object is a dataframe
Parameters
-------------
log
Log object
Returns
-------------
boolean
Is dataframe?
"""
if pkgutil.find_loader("pandas"):
return type(log) is pd.DataFrame
return False
|
93fa02445302cf695fd86beb3e4836d58660e376
| 3,641,124
|
def mk_creoson_post_sessionId(monkeypatch):
"""Mock _creoson_post return dict."""
def fake_func(client, command, function, data=None, key_data=None):
return "123456"
monkeypatch.setattr(
creopyson.connection.Client, '_creoson_post', fake_func)
|
c8f5fcec40d55e14a7c955a90f32eccf597179f3
| 3,641,125
|
def isUsernameFree(name):
"""Checks to see if the username name is free for use."""
global username_array
global username
for conn in username_array:
if name == username_array[conn] or name == username:
return False
return True
|
6a30766e35228e1ebea47c7f6d4f7f4f2832572d
| 3,641,126
|
def get_quadrangle_dimensions(vertices):
"""
:param vertices:
A 3D numpy array which contains a coordinates of a quadrangle, it should look like this:
D---C
| |
A---B
[ [[Dx, Dy]], [[Cx, Cy]], [[Bx, By]], [[Ax, Ay]] ].
:return:
width, height (which are integers)
"""
temp = np.zeros((4, 2), dtype=int)
for i in range(4):
temp[i] = vertices[i, 0]
delta_x = temp[0, 0]-temp[1, 0]
delta_y = temp[0, 1]-temp[1, 1]
width1 = int((delta_x**2 + delta_y**2)**0.5)
delta_x = temp[1, 0] - temp[2, 0]
delta_y = temp[1, 1] - temp[2, 1]
width2 = int((delta_x**2 + delta_y**2)**0.5)
delta_x = temp[2, 0] - temp[3, 0]
delta_y = temp[2, 1] - temp[3, 1]
height1 = int((delta_x**2 + delta_y**2)**0.5)
delta_x = temp[3, 0] - temp[0, 0]
delta_y = temp[3, 1] - temp[0, 1]
height2 = int((delta_x**2 + delta_y**2)**0.5)
width = max(width1, width2)
height = max(height1, height2)
return width, height
|
25e74cf63237d616c1fd0b1b3428dd0763a27fba
| 3,641,127
|
def optimal_r(points, range_min, range_max):
"""
Computes the optimal Vietoris-Rips parameter r for the given list of points.
Parameter needs to be as small as possible and VR complex needs to have 1 component
:param points: list of tuples
:return: the optimal r parameter and list of (r, n_components) tuples
"""
step = (range_max - range_min) / 100
components_for_r = []
vr = defaultdict(list)
r = range_min
while r < range_max:
vr = vietoris(points, r)
comps = findComponents([s[0] for s in vr[0]], vr[1])
print("\rr=",r,"components=",len(comps), end="")
components_for_r.append((r, len(comps)))
if (len(comps) == 1):
# we found the solution with smallest r
print("\rDone, r=", r, "n components=", len(comps))
return r, vr, components_for_r
r += step
# ideal solution not found, return the last complex
print("\rNo ideal r found, returning the last one")
return r, vr, components_for_r
|
950c8cc40de9ff9fbb0bd142461fc2710b54ead9
| 3,641,128
|
from pathlib import Path
def get_root_path():
"""
this is to get the root path of the code
:return: path
"""
path = str(Path(__file__).parent.parent)
return path
|
bc01b4fb15569098286fc24379a258300ff2dfa0
| 3,641,129
|
def _handle_requirements(hass: core.HomeAssistant, component,
name: str) -> bool:
"""Install the requirements for a component."""
if hass.config.skip_pip or not hasattr(component, 'REQUIREMENTS'):
return True
for req in component.REQUIREMENTS:
if not pkg_util.install_package(req, target=hass.config.path('deps')):
_LOGGER.error('Not initializing %s because could not install '
'dependency %s', name, req)
return False
return True
|
6608228271fe37d607e7980d6c4cfe876b45b853
| 3,641,130
|
def check_image(filename):
""" Check if filename is an image """
try:
im = Image.open(filename)
im.verify() # is it an image?
return True
except OSError:
return False
|
b2854195c83cc6ab20e967e25f3892a46d3c146f
| 3,641,131
|
import torch
def encode_save(sig=np.random.random([1, nfeat, nsensors]), name='simpleModel_ini', dir_path="../src/vne/models"):
"""
This function will create a model, test it, then save a persistent version (a file)
Parameters
__________
sig:
a numpy array with shape (nsamples, nfeatures, nsensors). in general a single neural signal may contain multiple channels.
the multi-channel nature of the neural activations is a feature of the vne.
typically shaped with size (1,1,S) where S is number os sensors
the vne will map all the different channels to a single scalar encoded signal.
name:
string with the filename to save the model under
dir:
dir_path, the local directory to save the model
Returns
--------
model:
A copy of the encoder model generated by the function
"""
model = simpleModel().eval()
model.apply(init_weights_simple)
sig = torch.tensor(sig.astype(np.float32)).to('cpu')
enc = model(sig)
print("signal={}".format(sig))
print("encoded={}".format(enc))
# save the model
model.apply(vne.init_weights_simple)
save_model(encoder=model, name=name, dir_path=dir_path)
return model
|
96fbeb853c66afd7f4a6c3a1a087faee64ec76d0
| 3,641,132
|
from typing import Set
from typing import Tuple
from typing import Counter
def _imports_to_canonical_import(
split_imports: Set[Tuple[str, ...]],
parent_prefix=(),
) -> Tuple[str, ...]:
"""Extract the canonical import name from a list of imports
We have two rules.
1. If you have at least 4 imports and they follow a structure like
'a', 'a.b', 'a.b.c', 'a.b.d'
this is treated as a namespace package with a canonical import of `a.b`
2. If you have fewer imports but they have a prefix that is found in
KNOWN_NAMESPACE_PACKAGES
you are also treated as a namespace package
3. Otherwise return the commonprefix
"""
prefix: Tuple[str, ...] = commonprefix(list(split_imports))
c = Counter(len(imp) for imp in split_imports)
if (
len(prefix) == 1
and c.get(1) == 1
and (
(len(split_imports) > 3)
or (parent_prefix + prefix in KNOWN_NAMESPACE_PACKAGES)
)
):
ns_prefix = _imports_to_canonical_import(
split_imports={imp[1:] for imp in split_imports if len(imp) > 1},
parent_prefix=parent_prefix + prefix,
)
if prefix and ns_prefix:
return prefix + ns_prefix
return prefix
|
e9cfd11b5837576ccc6f380463fe4b8ce9f4a63d
| 3,641,133
|
def score_hmm_logprob(bst, hmm, normalize=False):
"""Score events in a BinnedSpikeTrainArray by computing the log
probability under the model.
Parameters
----------
bst : BinnedSpikeTrainArray
hmm : PoissonHMM
normalize : bool, optional. Default is False.
If True, log probabilities will be normalized by their sequence
lengths.
Returns
-------
logprob : array of size (n_events,)
Log probabilities, one for each event in bst.
"""
logprob = np.atleast_1d(hmm.score(bst))
if normalize:
logprob = np.atleast_1d(logprob) / bst.lengths
return logprob
|
d20e5e75c875602c7ac2e3b0dadc5adcae45406d
| 3,641,135
|
def rotate_2d_list(squares_list):
"""
http://stackoverflow.com/questions/8421337/rotating-a-two-dimensional-array-in-python
"""
return [x for x in zip(*squares_list[::-1])]
|
a6345ce6954643b8968ffb4c978395baf777fca4
| 3,641,136
|
def next_ticket(ticket):
"""Return the next ticket for the given ticket.
Args:
ticket (Ticket): an arbitrary ticket
Returns:
the next ticket in the chain of tickets, having the next
pseudorandom ticket number, the same ticket id, and a
generation number that is one larger.
Example:
>>> next_ticket(Ticket(ticket_number='0.26299714122838008416507544297546663599715395525154425586041245287750224561854', id='AB-130', generation=1))
Ticket(ticket_number='0.8232357229934205790595761924514048157652891124687533667363938813600770093316', id='AB-130', generation=2)
"""
return Ticket(next_fraction(ticket.ticket_number),
ticket.id,
ticket.generation+1)
|
7f5cfbd9992322e394a9e3f4316d654ef9ad6749
| 3,641,137
|
def icecreamParlor4(m, arr):
"""I forgot about Python's nested for loops - on the SAME array - and how
that's actually -possible-, and it makes things so much simplier.
It turns out, that this works, but only for small inputs."""
# Augment the array of data, so that it not only includes the item, but
# also the item's index into the array.
decorated_arr = []
for index, item in enumerate(arr):
decorated_arr.append((item, index))
# Iterate each combination of index, and see if conditions are met with
# them. There are 2 things we care about: both amounts equal to m, and
# they both aren't the same item.
for i in decorated_arr:
for j in decorated_arr:
if (i[0] + j[1]) == m and i[1] != j[1]:
return [i[1] + 1, j[1] + 1]
|
6af2da037aa3e40c650ac48ebeb931399f1a6eaa
| 3,641,138
|
import datasets
def get_data(filepath, transform=None, rgb=False):
"""
Read in data from the given folder.
Parameters
----------
filepath: str
Path for the file e.g.: F'string/containing/filepath'
transform: callable
A function which tranforms the data to the required format
rgb: bool
Image type for different channel types
Returns
-------
torchvision.datasets.folder.ImageFolder
Required data after read in
"""
if transform is None:
transform = transforms.Compose([transforms.ToTensor()])
if rgb:
# will read the data into 3 channels,
# majority of images are 1 channel only however
xray_data = datasets.ImageFolder(root=filepath,
transform=transform)
else:
# read in all images as one channel
xray_data = datasets.ImageFolder(root=filepath, transform=transform)
return xray_data
|
99eb506c9033e259ad5769d8eff10b7fd985a1d6
| 3,641,139
|
def read_input_files(input_file: str) -> frozenset[IntTuple]:
"""
Extracts an initial pocket dimension
which is a set of active cube 3D coordinates.
"""
with open(input_file) as input_fobj:
pocket = frozenset(
(x, y)
for y, line in enumerate(input_fobj)
for x, char in enumerate(line.strip())
if char == '#'
)
return pocket
|
c44e85e0c4f998e04b3d5e8b1e4dc30260102fce
| 3,641,141
|
def resnet101(pretrained=False, root='./pretrain_models', **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
|
ba77d75d52aa5ce92cc5a8bec09347c799ebc02a
| 3,641,142
|
def insert_left_side(left_side, board_string):
"""
Replace the left side of the Sudoku board 'board_string' with 'left_side'.
"""
# inputs should match in upper left corner
assert(left_side[0] == board_string[0])
# inputs should match in lower left corner
assert(left_side[8] == low_left_digit(board_string))
as_list = list(board_string)
for idx in range(9):
as_list[idx*9] = left_side[idx]
return "".join(as_list)
|
da607155e5c82a597f1ec62e6def3fbe31119c36
| 3,641,143
|
import tqdm
import uuid
def collect_story_predictions(story_file, policy_model_path, nlu_model_path,
max_stories=None, shuffle_stories=True):
"""Test the stories from a file, running them through the stored model."""
def actions_since_last_utterance(tracker):
actions = []
for e in reversed(tracker.events):
if isinstance(e, UserUttered):
break
elif isinstance(e, ActionExecuted):
actions.append(e.action_name)
actions.reverse()
return actions
if nlu_model_path is not None:
interpreter = RasaNLUInterpreter(model_directory=nlu_model_path)
else:
interpreter = RegexInterpreter()
agent = Agent.load(policy_model_path, interpreter=interpreter)
stories = _get_stories(story_file, agent.domain,
max_stories=max_stories,
shuffle_stories=shuffle_stories)
preds = []
actual = []
logger.info("Evaluating {} stories\nProgress:".format(len(stories)))
for s in tqdm(stories):
sender = "default-" + uuid.uuid4().hex
dialogue = s.as_dialogue(sender, agent.domain)
actions_between_utterances = []
last_prediction = []
for i, event in enumerate(dialogue.events[1:]):
if isinstance(event, UserUttered):
p, a = _min_list_distance(last_prediction,
actions_between_utterances)
preds.extend(p)
actual.extend(a)
actions_between_utterances = []
agent.handle_message(event.text, sender=sender)
tracker = agent.tracker_store.retrieve(sender)
last_prediction = actions_since_last_utterance(tracker)
elif isinstance(event, ActionExecuted):
actions_between_utterances.append(event.action_name)
if last_prediction:
preds.extend(last_prediction)
preds_padding = len(actions_between_utterances) - \
len(last_prediction)
preds.extend(["None"] * preds_padding)
actual.extend(actions_between_utterances)
actual_padding = len(last_prediction) - \
len(actions_between_utterances)
actual.extend(["None"] * actual_padding)
return actual, preds
|
d47d83c62d119f99c5c3c74b490866ffc64dc3ed
| 3,641,144
|
import select
def sniff(store=False, prn=None, lfilter=None,
count=0,
stop_event=None, refresh=.1, *args, **kwargs):
"""Sniff packets
sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2ListenSocket args)
store: wether to store sniffed packets or discard them
prn: function to apply to each packet. If something is returned,
it is displayed. Ex:
ex: prn = lambda x: x.summary()
lfilter: python function applied to each packet to determine
if further action may be done
ex: lfilter = lambda x: x.haslayer(Padding)
stop_event: Event that stops the function when set
refresh: check stop_event.set() every refresh seconds
"""
s = conf.L2listen(type=ETH_P_ALL, *args, **kwargs)
n = 0
lst = []
try:
while True:
if stop_event and stop_event.is_set():
break
sel = select([s], [], [], refresh)
if s in sel[0]:
p = s.recv(MTU)
if p is None:
break
if lfilter and not lfilter(p):
continue
if store:
lst.append(p)
if prn:
r = prn(p)
if r is not None:
print(r)
n += 1
if count and n == count:
break
except KeyboardInterrupt:
pass
finally:
s.close()
return plist.PacketList(lst, "Sniffed")
|
06309d056c0a68046025b806d91b7c7f0c5fdeb6
| 3,641,146
|
def _brighten_images(images: np.ndarray, brightness: int = BRIGHTNESS) -> np.ndarray:
"""
Adjust the brightness of all input images
:params images: The original images of shape [H, W, D].
:params brightness: The amount the brighness should be raised or lowered
:return: Images with adjusted brightness
"""
brighten_images = deepcopy(images)
for image in brighten_images:
_brighten_image(image, brightness)
return brighten_images
|
b2bc8c801e459b5f913a2bf725709cbc80df128a
| 3,641,148
|
def CreateMovie(job_name, input_parameter, view_plane, plot_type):
"""encoder = os.system("which ffmpeg")
print encoder
if(len(encoder) == 0):
feedback['error'] = 'true'
feedback['message'] = ('ERROR: Movie create encoder not found')
print feedback
return
"""
movie_name = job_name + '_' + input_parameter+ '_' + view_plane + '_'+ plot_type + "_movie.mp4"
cmd = "ffmpeg -qscale 1 -r 3 -b 3000k -i " + input_parameter + '_' + view_plane + '_'+ plot_type + "_image_%01d.png " + movie_name;
print cmd
os.system(cmd)
return movie_name
|
ff518f485db1de7a2d00af2984f5829cfe4279dc
| 3,641,150
|
def describe_recurrence(recur, recurrence_dict, connective="and"):
"""Create a textual description of the recur set.
Arguments:
recur (Set): recurrence pattern as set of day indices eg Set(["1","3"])
recurrence_dict (Dict): map of strings to recurrence patterns
connective (Str): word to connect list of days, default "and"
Returns:
Str: List of days as a human understandable string
"""
day_list = list(recur)
day_list.sort()
days = " ".join(day_list)
for recurrence in recurrence_dict:
if recurrence_dict[recurrence] == days:
return recurrence # accept the first perfect match
# Assemble a long desc, e.g. "Monday and Wednesday"
day_names = []
for day in days.split(" "):
for recurrence in recurrence_dict:
if recurrence_dict[recurrence] == day:
day_names.append(recurrence)
break
return join_list(day_names, connective)
|
0189fa88f64a284367829c3f95ecd056aec7bfa8
| 3,641,151
|
def route_image_zoom_in():
"""
Zooms in.
"""
result = image_viewer.zoom_in()
return jsonify({'zoom-in' : result})
|
758acf681718cc1c2be2c3bac112f4fc83243cf9
| 3,641,152
|
def evaluate_flow_file(gt_file, pred_file):
"""
evaluate the estimated optical flow end point error according to ground truth provided
:param gt_file: ground truth file path
:param pred_file: estimated optical flow file path
:return: end point error, float32
"""
# Read flow files and calculate the errors
gt_flow = read_flow(gt_file) # ground truth flow
eva_flow = read_flow(pred_file) # predicted flow
# Calculate errors
average_pe = flow_error(gt_flow[:, :, 0], gt_flow[:, :, 1],
eva_flow[:, :, 0], eva_flow[:, :, 1])
return average_pe
|
ecbe960cd011f4282758a3f6bf4f345853f9a49d
| 3,641,153
|
def get_heavy_load_rses(threshold, session=None):
"""
Retrieve heavy load rses.
:param threshold: Threshold as an int.
:param session: Database session to use.
:returns: .
"""
try:
results = session.query(models.Source.rse_id, func.count(models.Source.rse_id).label('load'))\
.filter(models.Source.is_using == true())\
.group_by(models.Source.rse_id)\
.all()
if not results:
return
result = []
for t in results:
if t[1] >= threshold:
t2 = {'rse_id': t[0], 'load': t[1]}
result.append(t2)
return result
except IntegrityError as error:
raise RucioException(error.args)
|
52b44132c1680e0ca9ba64a0afb2938abcab1228
| 3,641,154
|
import operator
def facetcolumns(table, key, missing=None):
"""
Like :func:`petl.util.materialise.columns` but stratified by values of the
given key field. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 2, True],
... ['b', 3]]
>>> fc = etl.facetcolumns(table, 'foo')
>>> fc['a']
{'foo': ['a'], 'bar': [1], 'baz': [True]}
>>> fc['b']
{'foo': ['b', 'b'], 'bar': [2, 3], 'baz': [True, None]}
"""
fct = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
indices = asindices(hdr, key)
assert len(indices) > 0, 'no key field selected'
getkey = operator.itemgetter(*indices)
for row in it:
kv = getkey(row)
if kv not in fct:
cols = dict()
for f in flds:
cols[f] = list()
fct[kv] = cols
else:
cols = fct[kv]
for f, v in izip_longest(flds, row, fillvalue=missing):
if f in cols:
cols[f].append(v)
return fct
|
bec3419a22128c2863032022c5e9e507c9791f1b
| 3,641,155
|
def divide_set(vectors, labels, column, value):
"""
Divide the sets into two different sets along a specific dimension and value.
"""
set_1 = [(vector, label) for vector, label in zip(vectors, labels) if split_function(vector, column, value)]
set_2 = [(vector, label) for vector, label in zip(vectors, labels) if not split_function(vector, column, value)]
vectors_set_1 = [element[0] for element in set_1]
vectors_set_2 = [element[0] for element in set_2]
label_set_1 = [element[1] for element in set_1]
label_set_2 = [element[1] for element in set_2]
return vectors_set_1, label_set_1, vectors_set_2, label_set_2
|
dd99c4d700ad8294ce2b2a33b18feb79165487fb
| 3,641,156
|
def execute_freezerc(dict, must_fail=False, merge_stderr=False):
"""
:param dict:
:type dict: dict[str, str]
:param must_fail:
:param merge_stderr:
:return:
"""
return execute([FREEZERC] + dict_to_args(dict), must_fail=must_fail,
merge_stderr=merge_stderr)
|
596490d1fe0ae90a807c6553674273e470165e57
| 3,641,157
|
def corField2D_vector(field):
"""
2D correlation field of a vector field. Correlations are calculated with
use of Fast Fourier Transform.
Parameters
----------
field : (n, n, 2) shaped array like
Vector field to extract correlations from.
Points are supposed to be uniformly distributed.
Returns
-------
C : 2D numpy array
Unnormalised correlation field.
C[0, 0] is the origin, points are uniformly distributed.
xCL : float
Unnormalised longitudinal correlation of field projected on the first
direction of space at distance equal to field grid spacing.
yCL : float
Unnormalised longitudinal correlation of field projected on the second
direction of space at distance equal to field grid spacing.
xCT : float
Unnormalised transversal correlation of field projected on the first
direction of space at distance equal to field grid spacing.
yCT : float
Unnormalised transversal correlation of field projected on the second
direction of space at distance equal to field grid spacing.
Norm : float
Norm of correlation field.
"""
xfield = field[:, :, 0] # projection of field on the first direction of space
xC, xNorm = corField2D_scalar(xfield) # unnormalised correlation field and its norm associated to field projection on the first direction of space
yfield = field[:, :, 1] # projection of field on the second direction of space
yC, yNorm = corField2D_scalar(yfield) # unnormalised correlation field and its norm associated to field projection on the second direction of space
C = xC + yC # correlation field of field
xCL, yCL = xC[0, 1], yC[1, 0] # longitudinal correlations in first and second directions of space
xCT, yCT = xC[1, 0], yC[0, 1] # transversal correlations in first and second directions of space
Norm = xNorm + yNorm # norm of correlation field
return C, xCL, yCL, xCT, yCT, Norm
|
35b4c1dc646c30b9fdae7c1ad1095ae67ea1356d
| 3,641,158
|
import json
def load_json_fixture(filename: str):
"""Load stored JSON data."""
return json.loads((TEST_EXAMPLES_PATH / filename).read_text())
|
1021975d2111a9391a93ff9bd757479a8c9663f6
| 3,641,159
|
from typing import List
from typing import Optional
def stat_list_card(
box: str,
title: str,
items: List[StatListItem],
name: Optional[str] = None,
subtitle: Optional[str] = None,
commands: Optional[List[Command]] = None,
) -> StatListCard:
"""Render a card displaying a list of stats.
Args:
box: A string indicating how to place this component on the page.
title: The title.
items: The individual stats to be displayed.
name: An optional name for this item.
subtitle: The subtitle, displayed below the title.
commands: Contextual menu commands for this component.
Returns:
A `h2o_wave.types.StatListCard` instance.
"""
return StatListCard(
box,
title,
items,
name,
subtitle,
commands,
)
|
fa9036442fdad3028d40918741e91ca5077925fb
| 3,641,160
|
def test_DataGeneratorAllSpectrums_fixed_set():
"""
Test whether use_fixed_set=True toggles generating the same dataset on each epoch.
"""
# Get test data
binned_spectrums, tanimoto_scores_df = create_test_data()
# Define other parameters
batch_size = 4
dimension = 88
# Create normal generator
normal_generator = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums[:8],
reference_scores_df=tanimoto_scores_df,
dim=dimension, batch_size=batch_size,
use_fixed_set=False)
# Create generator that generates a fixed set every epoch
fixed_generator = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums[:8],
reference_scores_df=tanimoto_scores_df,
dim=dimension, batch_size=batch_size,
num_turns=5, use_fixed_set=True)
def collect_results(generator):
n_batches = len(generator)
X = np.zeros((batch_size, dimension, 2, n_batches))
y = np.zeros((batch_size, n_batches))
for i, batch in enumerate(generator):
X[:, :, 0, i] = batch[0][0]
X[:, :, 1, i] = batch[0][1]
y[:, i] = batch[1]
return X, y
first_X, first_y = collect_results(normal_generator)
second_X, second_y = collect_results(normal_generator)
assert not np.array_equal(first_X, second_X)
assert first_y.shape == (4, 2), "Expected different number of labels"
first_X, first_y = collect_results(fixed_generator)
second_X, second_y = collect_results(fixed_generator)
assert np.array_equal(first_X, second_X)
assert first_y.shape == (4, 10), "Expected different number of labels"
# Create another fixed generator based on the same dataset that should generate the same
# fixed set
fixed_generator2 = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums[:8],
reference_scores_df=tanimoto_scores_df,
dim=dimension, batch_size=batch_size,
num_turns=5, use_fixed_set=True)
first_X, first_y = collect_results(fixed_generator)
second_X, second_y = collect_results(fixed_generator2)
assert np.array_equal(first_X, second_X)
|
95d8a13f1eaa5d7ef93936a8f9e224a56119e6af
| 3,641,161
|
import math
def inverse_gamma(data, alpha=0.1, beta=0.1):
"""
Inverse gamma distributions
:param data: Data value
:param alpha: alpha value
:param beta: beta value
:return: Inverse gamma distributiion
"""
return (pow(beta, alpha) / math.gamma(alpha)) *\
pow(alpha, data-1) * math.exp(-beta/data)
|
c13f5e4a05e111ae0082b7e69ef5b31498d2c221
| 3,641,162
|
def query(queryid):
"""
Dynamic Query View.
Must be logged in to access this view, otherwise redirected to login page.
A unique view is generated based off a query ID.
A page is only returned if the query ID is associated with a logged in user.
Otherwise a logged in user will be redirected to a 404 error page.
"""
query = models.SpellChecks.query.get(queryid)
if query is not None and ((g.user.is_admin) or (g.user.username == query.username)):
query
render = make_response(render_template('spellcheck/history_s_query.html', query=query))
render.headers.set('Content-Security-Policy', "default-src 'self'")
render.headers.set('X-Content-Type-Options', 'nosniff')
render.headers.set('X-Frame-Options', 'SAMEORIGIN')
render.headers.set('X-XSS-Protection', '1; mode=block')
return render
else:
abort(404)
|
22c77dc122b15d8bb6589a823a8f3904f189371a
| 3,641,163
|
def get_ssid() -> str:
"""Gets SSID of the network connected.
Returns:
str:
Wi-Fi or Ethernet SSID.
"""
process = Popen(
['/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport', '-I'],
stdout=PIPE)
out, err = process.communicate()
if error := process.returncode:
logger.error(f"Failed to fetch SSID with exit code: {error}\n{err}")
# noinspection PyTypeChecker
return dict(map(str.strip, info.split(': ')) for info in out.decode('utf-8').splitlines()[:-1] if
len(info.split()) == 2).get('SSID')
|
def6c485099219cd0894cde636ac93dc1b130a98
| 3,641,164
|
def train_predict(clf, X_train, X_test, y_train, y_test):
"""Train clf on <X_train, y_train>, predict <X_test, y_test>; return y_pred."""
print("Training a {}...".format(clf.__class__.__name__))
get_ipython().run_line_magic('time', 'clf.fit(X_train, y_train)')
print(clf)
print("Predicting test labels...")
y_pred = clf.predict(X_test)
return y_pred
|
eda266df2cea704d557e4de4f7600b62c533f362
| 3,641,165
|
def get_executable_choices(versions):
"""
Return available Maya releases.
"""
return [k for k in versions if not k.startswith(Config.DEFAULTS)]
|
df0c253b8ca29b42f7863a8944d21678847b7c9a
| 3,641,167
|
def list_songs():
"""
Lists all the songs in your media server
Can do this without a login
"""
# # Check if the user is logged in, if not: back to login.
# if('logged_in' not in session or not session['logged_in']):
# return redirect(url_for('login'))
page['title'] = 'List Songs'
# Get a list of all songs from the database
allsongs = None
allsongs = database.get_allsongs()
# Data integrity checks
if allsongs == None:
allsongs = []
return render_template('listitems/listsongs.html',
session=session,
page=page,
user=user_details,
allsongs=allsongs)
|
bbeb4e11404dcce582ed86cc711508645399a7cd
| 3,641,168
|
import statistics
def linear_regression(xs, ys):
"""
Computes linear regression coefficients
https://en.wikipedia.org/wiki/Simple_linear_regression
Returns a and b coefficients of the function f(y) = a * x + b
"""
x_mean = statistics.mean(xs)
y_mean = statistics.mean(ys)
num, den = 0.0, 0.0
for x, y in zip(xs, ys):
num += (x - x_mean) * (y - y_mean)
den += (x - x_mean) * (x - x_mean)
a = num / den
b = y_mean - a * x_mean
return a, b
|
6b6ecbd31262e5fe61f9cf7793d741a874327598
| 3,641,169
|
def calcR1(n_hat):
"""
Calculate the rotation matrix that would rotate the
position vector x_ae to the x-y plane.
Parameters
----------
n_hat : `~numpy.ndarray` (3)
Unit vector normal to plane of orbit.
Returns
-------
R1 : `~numpy.matrix` (3, 3)
Rotation matrix.
"""
n_hat_ = np.ascontiguousarray(n_hat)
# Find the rotation axis v
v = np.cross(n_hat_, Z_AXIS)
# Calculate the cosine of the rotation angle, equivalent to the cosine of the
# inclination
c = np.dot(n_hat_, Z_AXIS)
# Compute the skew-symmetric cross-product of the rotation axis vector v
vp = np.array([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
# Calculate R1
R1 = np.identity(3) + vp + np.linalg.matrix_power(vp, 2) * (1 / (1 + c))
return R1
|
c3202c477eefe43648e693316079f9e2c6ac0aa0
| 3,641,170
|
def get_arb_info(info, n=1000):
"""
Example: info := {'start':1556668800, 'period':300, 'trading_pair':'eth_btc', 'exchange_id':'binance'}
"""
assert {'exchange_id', 'trading_pair', 'period', 'start'}.issubset(info.keys())
info['n'] = n
q = """with sub as (
select * from candlesticks
where trading_pair=%(trading_pair)s and period=%(period)s and timestamp>=%(start)s
),
thing as (
select "timestamp", avg(close) from sub
group by (timestamp)
)
select exchange,trading_pair, thing.timestamp, "period", "avg", "close"-"avg" as arb_diff, ("close"-"avg")/"avg" as arb_signal from
(sub inner join thing on sub.timestamp = thing.timestamp)
where exchange=%(exchange_id)s
order by thing.timestamp
limit %(n)s;
"""
results = safe_qall(q, info)
if results is not None:
# arb_signal is more interpretable than arb_diff but the signal is the same
df = pd.DataFrame(results, columns=["exchange", "trading_pair", "timestamp", "period", "avg", "arb_diff", "arb_signal"])
return d.fix_df(df)
|
20177d71b0a816031aded0151e91711f729fe0bf
| 3,641,171
|
from typing import List
def parse_nested_root(stream: TokenStream) -> AstRoot:
"""Parse nested root."""
with stream.syntax(colon=r":"):
colon = stream.expect("colon")
if not consume_line_continuation(stream):
exc = InvalidSyntax("Expected non-empty block.")
raise set_location(exc, colon)
level, command_level = stream.indentation[-2:]
commands: List[AstCommand] = []
with stream.intercept("newline"), stream.provide(
scope=(),
line_indentation=command_level,
):
while True:
commands.append(delegate("command", stream))
# The command parser consumes the trailing newline so we need to rewind
# to be able to use "consume_line_continuation()".
while (token := stream.peek()) and not token.match("newline", "eof"):
stream.index -= 1
with stream.provide(multiline=True, line_indentation=level):
if not consume_line_continuation(stream):
break
node = AstRoot(commands=AstChildren(commands))
return set_location(node, commands[0], commands[-1])
|
ea559c4c2592b4ecabfb10df39059391799f4462
| 3,641,172
|
def wg_config_write():
""" Write configuration file. """
global wg_config_file
return weechat.config_write(wg_config_file)
|
add032c3447d2c68005bbc1cc33006ba452afaa3
| 3,641,173
|
def is_palindrome_permutation(phrase):
"""checks if a string is a permutation of a palindrome"""
table = [0 for _ in range(ord("z") - ord("a") + 1)]
countodd = 0
for c in phrase:
x = char_number(c)
if x != -1:
table[x] += 1
if table[x] % 2:
countodd += 1
else:
countodd -= 1
return countodd <= 1
|
dac9d0fc67f628cb22213d5fcad947baa3d2d8f1
| 3,641,174
|
import json
def get_stored_username():
"""Get Stored Username"""
filename = 'numbers.json'
try:
with open(filename) as file_object:
username = json.load(file_object)
except FileNotFoundError:
return None
else:
return username
|
3471369cfc147dd9751cedb7cde92ceb5d69e908
| 3,641,175
|
import posixpath
def post_process_symbolizer_image_file(file_href, dirs):
""" Given an image file href and a set of directories, modify the image file
name so it's correct with respect to the output and cache directories.
"""
# support latest mapnik features of auto-detection
# of image sizes and jpeg reading support...
# http://trac.mapnik.org/ticket/508
mapnik_auto_image_support = (MAPNIK_VERSION >= 701)
mapnik_requires_absolute_paths = (MAPNIK_VERSION < 601)
file_href = urljoin(dirs.source.rstrip('/')+'/', file_href)
scheme, n, path, p, q, f = urlparse(file_href)
if scheme in ('http','https'):
scheme, path = '', locally_cache_remote_file(file_href, dirs.cache)
if scheme not in ('file', '') or not systempath.exists(un_posix(path)):
raise Exception("Image file needs to be a working, fetchable resource, not %s" % file_href)
if not mapnik_auto_image_support and not Image:
raise SystemExit('PIL (Python Imaging Library) is required for handling image data unless you are using PNG inputs and running Mapnik >=0.7.0')
img = Image.open(un_posix(path))
if mapnik_requires_absolute_paths:
path = posixpath.realpath(path)
else:
path = dirs.output_path(path)
msg('reading symbol: %s' % path)
image_name, ext = posixpath.splitext(path)
if ext in ('.png', '.tif', '.tiff'):
output_ext = ext
else:
output_ext = '.png'
# new local file name
dest_file = un_posix('%s%s' % (image_name, output_ext))
if not posixpath.exists(dest_file):
img.save(dest_file,'PNG')
msg('Destination file: %s' % dest_file)
return dest_file, output_ext[1:], img.size[0], img.size[1]
|
42026823be510b5ffbb7404bd24809a1232e8206
| 3,641,176
|
import torch
def collate_fn_feat_padded(batch):
"""
Sort a data list by frame length (descending order)
batch : list of tuple (feature, label). len(batch) = batch_size
- feature : torch tensor of shape [1, 40, 80] ; variable size of frames
- labels : torch tensor of shape (1)
ex) samples = collate_fn([batch])
batch = [dataset[i] for i in batch_indices]. ex) [Dvector_train_dataset[i] for i in [0,1,2,3,4]]
batch[0][0].shape = torch.Size([1,64,774]). "774" is the number of frames per utterance.
"""
batch.sort(key=lambda x: x[0].shape[2], reverse=True)
feats, labels = zip(*batch)
# Merge labels => torch.Size([batch_size,1])
labels = torch.stack(labels, 0)
labels = labels.view(-1)
# Merge frames
lengths = [feat.shape[2] for feat in feats] # in decreasing order
max_length = lengths[0]
# features_mod.shape => torch.Size([batch_size, n_channel, dim, max(n_win)])
padded_features = torch.zeros(len(feats), feats[0].shape[0], feats[0].shape[1], feats[0].shape[2]).float() # convert to FloatTensor (it should be!). torch.Size([batch, 1, feat_dim, max(n_win)])
for i, feat in enumerate(feats):
end = lengths[i]
num_frames = feat.shape[2]
while max_length > num_frames:
feat = torch.cat((feat, feat[:,:,:end]), 2)
num_frames = feat.shape[2]
padded_features[i, :, :, :] = feat[:,:,:max_length]
return padded_features, labels
|
b901b6c3eacfd4d0bac93e1569d59ad944365fd2
| 3,641,177
|
import unicodedata
import re
def slugify(value, allow_unicode=False):
"""
Taken from https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value.lower())
return re.sub(r'[-\s]+', '-', value).strip('-_')
|
39f7ed291e6a2cec5111ba979c35a6aaa8e521c0
| 3,641,178
|
def average_gradient_norm(model, data):
""" Computes the average gradient norm for a keras model """
# just checking if the model was already compiled
if not hasattr(model, "train_function"):
raise RuntimeError("You must compile your model before using it.")
weights = model.trainable_weights # weight tensors
get_gradients = model.optimizer.get_gradients(model.total_loss, weights) # gradient tensors
input_tensors = [
# input data
model.inputs[0],
# how much to weight each sample by
model.sample_weights[0],
# labels
model.targets[0],
# train or test mode
keras.backend.learning_phase()
]
grad_fct = keras.backend.function(inputs=input_tensors, outputs=get_gradients)
steps = 0
total_norm = 0
s_w = None
while steps < data.steps_per_epoch:
X, y = next(data)
# set sample weights to one
# for every input
if s_w is None:
s_w = np.ones(X.shape[0])
gradients = grad_fct([X, s_w, y, 0])
total_norm += np.sqrt(np.sum([np.sum(np.square(g)) for g in gradients]))
steps += 1
return total_norm / float(steps)
|
84f5feb894f72856ef43eea8befc048f216020bd
| 3,641,179
|
import logging
def string_regex_matcher(input_str: str, regex: str, replacement_str=""):
"""Python version of StringRegexMatcher in mlgtools.
Replaces all substring matched with regular expression (regex) with replacement string (replacement_str).
Args:
input_str (str): input string to match
regex (str): regular expression to match
replacement_str (str): replacement string for string matched with regex
Returns:
str: string removed replacement_str if it is set, or otherwise the original string
"""
# log error if regex is None or empty
if not regex:
log(logging.INFO, DataCategory.PUBLIC,
'_string_regex_matcher: regex is None or empty. Returning original sentence.')
return input_str
# Compile the regular expression
regex_compiled = check_and_compile_regular_expression(regex)
# Return the string with replacing matched substrings with replacement_str
return regex_compiled.sub(replacement_str, input_str)
|
c813447a1453347a1b263f603970d8ff4f709696
| 3,641,180
|
def cont_scatterplot(data: pd.DataFrame,
x: str,
y: str,
z: str or None,
label: str,
cmap: str,
size: int or str or None,
fig: plt.Figure,
cbar_kwargs: dict,
**kwargs):
"""
Scatterplot with continuous label
Parameters
----------
data: Pandas.DataFrame
x: str
y: str
z: str, optional
label: str
cmap: str
size: int or str, optional
fig: Matplotlib.Figure
cbar_kwargs: dict
Keyword arguments passed to colorbar
kwargs:
Additional keyword arguments passed to Matplotlib.Axes.scatter call
Returns
-------
Matplotlib.Axes
"""
if isinstance(size, str):
size = data[size].values
if z is not None:
ax = fig.add_subplot(111, projection="3d")
im = ax.scatter(data[x].values,
data[y].values,
data[z].values,
c=data[label].values,
s=size,
cmap=cmap,
**kwargs)
else:
ax = fig.add_subplot(111)
im = ax.scatter(data[x].values,
data[y].values,
c=data[label].values,
s=size,
cmap=cmap,
**kwargs)
fig.colorbar(im, ax=ax, **cbar_kwargs)
return ax
|
e6c85d1fafa93c9000c807c7ec82ac7851082aec
| 3,641,181
|
def performance(problem, W, H, C, R_full):
"""Compute the performance of the IMC estimates."""
assert isinstance(problem, IMCProblem), \
"""`problem` must be an IMC problem."""
assert W.ndim == H.ndim, """Mismatching dimensionality."""
if W.ndim < 3:
W, H = np.atleast_3d(W, H)
n_iterations = W.shape[-1]
assert W.shape[-1] == H.shape[-1], """Mismatching number of iterations."""
# sparsitry coefficients
sparsity_W = np.isclose(W, 0).mean(axis=(0, 1))
sparsity_H = np.isclose(H, 0).mean(axis=(0, 1))
# Regularization -- components
reg_ridge = (0.5 * np.linalg.norm(W, "fro", axis=(0, 1))**2 +
0.5 * np.linalg.norm(H, "fro", axis=(0, 1))**2)
reg_group = (np.linalg.norm(W, 2, axis=1).sum(axis=0) +
np.linalg.norm(H, 2, axis=1).sum(axis=0))
reg_lasso = (np.linalg.norm(W.reshape(-1, W.shape[-1]), 1, axis=0) +
np.linalg.norm(H.reshape(-1, H.shape[-1]), 1, axis=0))
# Regularization -- full
C_lasso, C_group, C_ridge = C
regularizer_value = (C_group * reg_group +
C_lasso * reg_lasso +
C_ridge * reg_ridge)
# sequential forbenius norm of the matrices
div_W = np.r_[np.linalg.norm(np.diff(W, axis=-1),
"fro", axis=(0, 1)), 0]
div_H = np.r_[np.linalg.norm(np.diff(H, axis=-1),
"fro", axis=(0, 1)), 0]
# Objective value on the train data
v_val_train = np.array([problem.value(W[..., i], H[..., i])
for i in range(n_iterations)])
# Objective on the full matrix (expensive!)
v_val_full = np.array([
problem.loss(problem.prediction(W[..., i], H[..., i]).ravel(),
R_full.ravel()).sum() for i in range(n_iterations)])
# Score on the full matrix (expensive!)
score_full = np.array([
problem.score(problem.prediction(W[..., i], H[..., i]).ravel(),
R_full.ravel()) for i in range(n_iterations)])
metrics = np.stack([v_val_train, regularizer_value,
score_full, v_val_full,
sparsity_W, sparsity_H,
div_W, div_H], axis=0)
titles = ['Observed Elements', 'Regularization', 'Score',
'Full Matrix', 'Zero Values of W', 'Zero Values of H',
'L2-Norm Variation W', 'L2-Norm Variation H']
units = ['L2-Loss', 'L2-Loss', 'Score', 'L2-Loss',
'%', '%', 'L2-Norm', 'L2-Norm']
return metrics, titles, units
|
79635826cc72633a0024df29980ecc678f20e32d
| 3,641,182
|
import pytz
from datetime import datetime
def courseschedules_to_private_ical_feed(user):
"""
Generate an ICAL feed for all course schedules associated with the given user.
The IDs given for each event are sequential, unique only amongst the results of this particular query, and not
guaranteed to be consistent across calls.
:param user: The user to generate an ICAL feed for.
:return: An ICAL string of all the user's course schedules.
"""
calendar = _create_calendar(user)
events = []
for course in Course.objects.for_user(user.pk).iterator():
events += coursescheduleservice.course_schedules_to_events(course, course.schedules)
timezone.activate(pytz.timezone(user.settings.time_zone))
for event in events:
calendar_event = icalendar.Event()
calendar_event["UID"] = f"he-{user.pk}-{event.pk}"
calendar_event["SUMMARY"] = event.title
calendar_event["DTSTAMP"] = icalendar.vDatetime(timezone.localtime(event.created_at))
if not event.all_day:
calendar_event["DTSTART"] = icalendar.vDatetime(timezone.localtime(event.start))
calendar_event["DTEND"] = icalendar.vDatetime(timezone.localtime(event.end))
else:
calendar_event["DTSTART"] = icalendar.vDate(event.start)
calendar_event["DTEND"] = icalendar.vDate((event.end + datetime.timedelta(days=1)))
calendar_event["DESCRIPTION"] = _create_event_description(event)
calendar.add_component(calendar_event)
timezone.deactivate()
return calendar.to_ical()
|
1d1ae7f650416eb240d0ce5240523d8c66067389
| 3,641,183
|
def AuxStream_Cast(*args):
"""
Cast(BaseObject o) -> AuxStream
AuxStream_Cast(Seiscomp::Core::BaseObjectPtr o) -> AuxStream
"""
return _DataModel.AuxStream_Cast(*args)
|
a38248e5476273aa6b18da5017a7ca0a033fd0a8
| 3,641,184
|
def BPNet(tasks, bpnet_params):
"""
BPNet architecture definition
Args:
tasks (dict): dictionary of tasks info specifying
'signal', 'loci', and 'bias' for each task
bpnet_params (dict): parameters to the BPNet architecture
The keys include (all are optional)-
'input_len': (int)
'output_profile_len': (int),
'motif_module_params': (dict) -
'filters' (list)
'kernel_sizes' (list)
'padding' (str)
'syntax_module_params': (dict) -
'num_dilation_layers' (int)
'filters' (int)
'kernel_size' (int)
'padding': (str)
'pre_activation_residual_unit' (boolean)
'profile_head_params': (dict) -
'filters' (int)
'kernel_size' (int)
'padding' (str)
'counts_head_params': (dict) -
'units' (int)
'profile_bias_module_params': (dict) -
'kernel_sizes' (list)
'counts_bias_module_params': (dict) - N/A
'use_attribution_prior': (boolean)
'attribution_prior_params': (dict) -
'frequency_limit' (int)
'limit_softness' (float)
'grad_smooth_sigma' (int)
'profile_grad_loss_weight' (float)
'counts_grad_loss_weight' (float)
'loss_weights': (list)
Returns:
tensorflow.keras.layers.Model
"""
# load params from json file
(input_len,
output_profile_len,
motif_module_params,
syntax_module_params,
profile_head_params,
counts_head_params,
profile_bias_module_params,
counts_bias_module_params,
use_attribution_prior,
attribution_prior_params) = load_params(bpnet_params)
# Step 1 - sequence input
one_hot_input = layers.Input(shape=(input_len, 4), name='sequence')
# Step 2 - Motif module (one or more conv layers)
motif_module_out = motif_module(
one_hot_input, motif_module_params['filters'],
motif_module_params['kernel_sizes'], motif_module_params['padding'])
# Step 3 - Syntax module (all dilation layers)
syntax_module_out = syntax_module(
motif_module_out, syntax_module_params['num_dilation_layers'],
syntax_module_params['filters'], syntax_module_params['kernel_size'],
syntax_module_params['padding'],
syntax_module_params['pre_activation_residual_unit'])
# Step 4.1 - Profile head (large conv kernel)
# Step 4.1.1 - get total number of output tracks across all tasks
num_tasks = len(list(tasks.keys()))
total_tracks = 0
for i in range(num_tasks):
total_tracks += len(tasks[i]['signal']['source'])
# Step 4.1.2 - conv layer to get pre bias profile prediction
profile_head_out = profile_head(
syntax_module_out, total_tracks,
profile_head_params['kernel_size'], profile_head_params['padding'])
# first let's figure out if bias input is required based on
# tasks info, this also affects the naming of the profile head
# and counts head layers
# total number of bias tasks in the tasks_info dictionary
total_bias_tracks = 0
# number of bias tracks in each task
task_bias_tracks = {}
for i in range(num_tasks):
task_bias_tracks[i] = _get_num_bias_tracks_for_task(tasks[i])
total_bias_tracks += task_bias_tracks[i]
# Step 4.1.3 crop profile head to match output_len
if total_bias_tracks == 0:
profile_head_name = 'profile_predictions'
else:
profile_head_name = 'profile_head_cropped'
crop_size = int_shape(profile_head_out)[1] // 2 - output_profile_len // 2
profile_head_out = layers.Cropping1D(
crop_size, name=profile_head_name)(profile_head_out)
# Step 4.2 - Counts head (global average pooling)
if total_bias_tracks == 0:
counts_head_name = 'logcounts_predictions'
else:
counts_head_name = 'counts_head'
counts_head_out = counts_head(
syntax_module_out, counts_head_name, total_tracks)
# Step 5 - Bias Input
# if the tasks have no bias tracks then profile_head and
# counts_head are the outputs of the model
inputs = [one_hot_input]
if total_bias_tracks == 0:
# we need to first rename the layers to correspond to what
# the batch generator sends
# At this point, since there is no bias the two outputs
# are called 'profile_head_cropped' & 'counts_head'
print("renaming layers")
profile_head_out._name = 'profile_predictions'
counts_head_out._name = 'logcounts_predictions'
profile_outputs = profile_head_out
logcounts_outputs = counts_head_out
else:
if num_tasks != len(profile_bias_module_params['kernel_sizes']):
raise NoTracebackException(
"Length on 'kernel_sizes' in profile_bias_module_params "
"must match #tasks")
# Step 5.1 - Define the bias input layers
profile_bias_inputs = []
counts_bias_inputs = []
for i in range(num_tasks):
if task_bias_tracks[i] > 0:
# profile bias input for task i
profile_bias_inputs.append(layers.Input(
shape=(output_profile_len, task_bias_tracks[i]),
name="profile_bias_input_{}".format(i)))
# counts bias input for task i
counts_bias_inputs.append(layers.Input(
shape=(task_bias_tracks[i]),
name="counts_bias_input_{}".format(i)))
# append to inputs
inputs.append(profile_bias_inputs[i])
inputs.append(counts_bias_inputs[i])
else:
profile_bias_inputs.append(None)
counts_bias_inputs.append(None)
# Step 5.2 - account for profile bias
profile_outputs = profile_bias_module(
profile_head_out, profile_bias_inputs, tasks,
kernel_sizes=profile_bias_module_params['kernel_sizes'])
# Step 5.3 - account for counts bias
logcounts_outputs = counts_bias_module(
counts_head_out, counts_bias_inputs, tasks)
if use_attribution_prior:
# instantiate attribution prior Model with inputs and outputs
return AttributionPriorModel(
attribution_prior_params['frequency_limit'],
attribution_prior_params['limit_softness'],
attribution_prior_params['grad_smooth_sigma'],
attribution_prior_params['profile_grad_loss_weight'],
attribution_prior_params['counts_grad_loss_weight'],
inputs=inputs,
outputs=[profile_outputs, logcounts_outputs])
else:
# instantiate keras Model with inputs and outputs
return Model(
inputs=inputs, outputs=[profile_outputs, logcounts_outputs])
|
cfffac8da543241160b6fc1963b8b19df7fa2b02
| 3,641,185
|
def getCubePixels(cubeImages):
"""
Returns a list containing the raw pixels from the `bpy.types.Image` images
in the list `cubeImages`. Factoring this functionality out into its own
function is useful for performance profiling.
"""
return [face.pixels[:] for face in cubeImages]
|
cdb2ba02ce9466e1b92a683dbea409e66b60c8da
| 3,641,186
|
import select
async def get_round_details(round_id):
"""
Get details for a given round (include snapshot)
"""
query = (
select(detail_columns)
.select_from(select_from_default)
.where(rounds_table.c.id == round_id)
) # noqa: E127
result = await conn.fetch_one(query=query)
return result and dict(result)
|
b7d64f52a8dff1a68a327651653f1f217f688146
| 3,641,187
|
import random
def dens_hist_plot(**kwargs):
"""
plot prediction probability density histogram
Arguments:
df: classification prediction probability in pandas datafrane
"""
data = {'top1prob' : random.sample(range(1, 100), 5),
'top2prob' : random.sample(range(1, 100), 5)
}
def_vals = {"df" : data
} # default parameters value
for k, v in def_vals.items():
kwargs.setdefault(k, v)
df = kwargs['df']
x = df['top1prob']
y = df['top2prob']
def make_anno(x=1, y=1, text=text_source):
return go.Annotation(
text=text, # annotation text
showarrow=False, # remove arrow
xref='paper', # use paper coords
yref='paper', # for both coordinates
xanchor='right', # x-coord line up with right end of text
yanchor='bottom', # y-coord line up with bottom end of text
x=x, # position's x-coord
y=y # and y-coord
)
title = 'Prediction Result<br>\
Top1, Top2' # plot's title
x_title = 'Top1 Probability'#.format(site1) # x and y axis titles
y_title = 'Top2 Probability'
# Make a layout object
layout1 = go.Layout(
title=title, # set plot's title
font=dict(
family='PT Sans Narrow', # global font
size=13
),
xaxis1=go.XAxis(
title=x_title, # set x-axis title
#range=xy_range, # x-axis range
zeroline=False # remove x=0 line
),
annotations=go.Annotations([ # add annotation citing the data source
make_anno()
]),
showlegend=True, # remove legend
autosize=False, # custom size
width=980, # set figure width
height=880, # and height
margin=dict(l=100,
r=50,
b=100,
t=50
)
)
trace1 = go.Scatter(
x=x, y=y, mode='markers', name='points',
marker=dict(color='rgb(102,0,0)', size=2, opacity=0.4)
)
trace2 = go.Histogram2dContour(
x=x, y=y, name='density', ncontours=20,
colorscale='Hot', reversescale=True, showscale=False
)
trace3 = go.Histogram(
x=x, name='x density',
marker=dict(color='rgb(102,0,0)'),
yaxis='y2'
)
trace4 = go.Histogram(
y=y, name='y density', marker=dict(color='rgb(102,100,200)'),
xaxis='x2'
)
data = [trace1, trace2, trace3, trace4]
layout = go.Layout(
showlegend=False,
autosize=False,
xaxis=dict(
domain=[0, 0.85],
showgrid=False,
zeroline=False, title = x_title
),
yaxis=dict(
domain=[0, 0.85],
showgrid=False,
zeroline=False, title = y_title
),
margin=dict(
t=50
),
hovermode='closest',
bargap=0,
xaxis2=dict(
domain=[0.85, 1],
showgrid=False,
zeroline=False
),
yaxis2=dict(
domain=[0.85, 1],
showgrid=False,
zeroline=False
)
)
fig = go.Figure(data=data, layout=layout)
fig.update(layout = layout1)
fig['layout'].update(images= [dict(
source= "image/0016_Blue_horizon.svg",
xref= "paper",
yref= "paper", xanchor="left", yanchor="bottom",
x= 0,
y= 0,
sizex= 0.1,
sizey= 0.1,
sizing= "stretch",
opacity= 0.5,
layer= "above")])
iplot(fig, show_link=False, config={'modeBarButtonsToRemove': ['sendDataToCloud'], 'showLink': False, 'displaylogo' : False})
#plot(fig, filename='network_predic.html', show_link=False, config={'modeBarButtonsToRemove': ['sendDataToCloud'], 'showLink': False, 'displaylogo' : False})
|
9b9e65f4aef04c2676b48737f4b9bf525172b6ea
| 3,641,188
|
import time
def run_query(run_id, athena_client, query, athena_database_name, wait_to_finish):
""" Run the given Athena query
Arguments:
run_id {string} -- run_id for the current Step Function execution
athena_client {boto3.client} -- Boto3 Athena client
query {string} -- Athena query to execute
athena_database_name {string} -- Athena database to use for query execution
wait_to_finish {boolean} -- Should method wait for the Athena query to finish?
Raises:
utility.S3InsightsException: when Athena query fails
Returns:
string -- Athena execution id
"""
output_location = {
'OutputLocation': 's3://{0}/{1}'.format(
config.DeploymentDetails.consolidated_inventory_bucket_name,
get_s3_output_location_prefix(run_id)),
}
if athena_database_name is not None:
query_response = athena_client.start_query_execution(
QueryString=query,
QueryExecutionContext={
'Database': athena_database_name
},
ResultConfiguration=output_location)
else:
query_response = athena_client.start_query_execution(
QueryString=query,
ResultConfiguration=output_location)
execution_id = query_response['QueryExecutionId']
if wait_to_finish:
for attempt_count in range(1, 10):
query_status = athena_client.get_query_execution(
QueryExecutionId=execution_id)
query_execution_status = query_status['QueryExecution']['Status']['State']
if utility.compare_strings(query_execution_status, 'succeeded'):
break
elif utility.compare_strings(query_execution_status, 'failed'):
raise utility.S3InsightsException('Athena query failed for unknown reasons')
time.sleep(30)
return execution_id
|
0ee4618de6df9e32bbe3255f7a423dd838a564f1
| 3,641,189
|
def next_version(v: str) -> str:
"""
If ``v`` is a prerelease version, returns the base version. Otherwise,
returns the next minor version after the base version.
"""
vobj = Version(v)
if vobj.is_prerelease:
return str(vobj.base_version)
vs = list(vobj.release)
vs[1] += 1
vs[2:] = [0] * len(vs[2:])
s = ".".join(map(str, vs))
if vobj.epoch:
s = f"{vobj.epoch}!{s}"
return s
|
aea76df6874e368494ca630d594bee978ffc7e08
| 3,641,190
|
def temp_obs():
"""Return a list of tobs from the 2016-08-24 to 2017-08-23"""
# Query temperature data with date
temp_query = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date > year_start_date).all()
# Create a dictionary from the row data and append to a list of all temperature data
temp_data = []
for date, tobs in temp_query:
temp_dict = {}
temp_dict["Date"] = date
temp_dict["Temperature"] = tobs
temp_data.append(temp_dict)
# Return data in json format
return jsonify(temp_data)
|
6a74ad37cb95d1103943f15dfcac82e39c38620d
| 3,641,192
|
def pcmh_5_5c__3():
"""ER/IP discharge log"""
er_ip_log_url = URL('init', 'word', 'er_ip_log.doc',
vars=dict(**request.get_vars),
hmac_key=MY_KEY, salt=session.MY_SALT, hash_vars=["app_id", "type"])
# er_ip_log tracking chart
er_ip_log = MultiQNA(
1, float('inf'), True,
'er_ip_log',
"Please fill out <a href='{url}'>this ER/IP log</a> with at least 4 months of past data. Continue to maintain "
"this log permanently as part of your PCMH transformation. <b>Please make sure all the patients in this log "
"have their discharge summary in their patient record!</b>"
.format(url=er_ip_log_url)
)
er_ip_log.set_template("{choose_file}")
return dict(documents=[
dict(
description="Emergency Room / Inpatient Tracking Log",
url=er_ip_log_url,
permissions=["IS_TEAM"]
),
])
|
968bcc181cf3ec4513cfeb73004cdd3336f62003
| 3,641,193
|
import json
def is_jsonable(data):
"""
Check is the data can be serialized
Source: https://stackoverflow.com/a/53112659/8957978
"""
try:
json.dumps(data)
return True
except (TypeError, OverflowError):
return False
|
acfc697025a8597fdd8043fe3245a339b27051c4
| 3,641,196
|
def check_ref_type(ref, allowed_types, ws_url):
"""
Validates the object type of ref against the list of allowed types. If it passes, this
returns True, otherwise False.
Really, all this does is verify that at least one of the strings in allowed_types is
a substring of the ref object type name.
Ex1:
ref = "KBaseGenomes.Genome-4.0"
allowed_types = ["assembly", "KBaseFile.Assembly"]
returns False
Ex2:
ref = "KBaseGenomes.Genome-4.0"
allowed_types = ["assembly", "genome"]
returns True
"""
obj_type = get_object_type(ref, ws_url).lower()
for t in allowed_types:
if t.lower() in obj_type:
return True
return False
|
263cf56124fb466544dfba8879a55d1f09160d35
| 3,641,197
|
from typing import Dict
from typing import List
def check_infractions(
infractions: Dict[str, List[str]],
) -> int:
"""
Check infractions.
:param infractions: the infractions dict {commit sha, infraction explanation}
:return: 0 if no infractions, non-zero otherwise
"""
if len(infractions) > 0:
logger.print('Missing sign-off(s):')
logger.print()
for commit_sha, commit_infractions in infractions.items():
logger.print('\t' + commit_sha)
for commit_infraction in commit_infractions:
logger.print('\t\t' + commit_infraction)
return 1
logger.print('All good!')
return 0
|
954f5f80dbfdd2a834f56662f4d04c1f788bb9ef
| 3,641,198
|
def default_role(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Set the default interpreted text role."""
if not arguments:
if roles._roles.has_key(''):
# restore the "default" default role
del roles._roles['']
return []
role_name = arguments[0]
role, messages = roles.role(
role_name, state_machine.language, lineno, state.reporter)
if role is None:
error = state.reporter.error(
'Unknown interpreted text role "%s".' % role_name,
nodes.literal_block(block_text, block_text), line=lineno)
return messages + [error]
roles._roles[''] = role
# @@@ should this be local to the document, not the parser?
return messages
|
f1f6ed82da74cd898d4df0753cd1044a6666d216
| 3,641,200
|
def input_fn(evaluate=False) -> tf.data.Dataset:
"""
Returns the text as char array
Args:
n_repetitions: Number of times to repeat the inputs
"""
# The dataset
g = ( evaluate_generator if evaluate else train_generator )
ds = tf.data.Dataset.from_generator( generator=g,
output_types=( { 'character' : tf.string } , tf.string ),
output_shapes=( { 'character' : (SEQUENCE_LENGHT,) } , () )
)
ds = ds.batch(64)
ds = ds.prefetch(1)
return ds
|
2954eb4b4f657fff3e029096a562516842c615e8
| 3,641,202
|
async def validate_input(data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
harmony = await get_harmony_client_if_available(data[CONF_HOST])
if not harmony:
raise CannotConnect
return {
CONF_NAME: find_best_name_for_remote(data, harmony),
CONF_HOST: data[CONF_HOST],
UNIQUE_ID: find_unique_id_for_remote(harmony),
}
|
fc174ed28bd80fd8e118094ea21358b9c8f41fa3
| 3,641,204
|
import numpy
def window_tukey(M, alpha=0.5):
"""Return a Tukey window, also known as a tapered cosine window.
The function returns a Hann window for `alpha=0` and a boxcar window for `alpha=1`
"""
if alpha == 0:
return numpy.hann(M)
elif alpha == 1:
return window_boxcar(M)
n = numpy.arange(0, M)
width = int(numpy.floor(alpha * (M - 1) / 2.0))
n1 = n[0:width + 1]
n2 = n[width + 1:M - width - 1]
n3 = n[M - width - 1:]
w1 = 0.5 * (1 + numpy.cos(numpy.pi * (-1 + 2.0 * n1 / alpha / (M - 1))))
w2 = numpy.ones(n2.shape)
w3 = 0.5 * (1 + numpy.cos(numpy.pi * (-2.0 / alpha + 1 + 2.0 * n3 / alpha / (M - 1))))
return numpy.concatenate((w1, w2, w3))
|
f62d216de29e2271270a4fc3c03b0f93930f1275
| 3,641,205
|
def workshopsDF(symbol="", **kwargs):
"""This is a meeting or series of meetings at which a group of people engage in discussion and activity on a particular subject, product or service to gain hands-on experience.
https://iexcloud.io/docs/api/#workshops
Args:
symbol (str): symbol to use
"""
return _baseDF(id="PREMIUM_WALLSTREETHORIZON_WORKSHOP", symbol=symbol, **kwargs)
|
7f6417acecdd7a2bd7279e5d0367894458589241
| 3,641,206
|
def free_path(temp, diff, m_mol):
"""
Calculates the free path for a molecule
Based on free_path.m by Joni Kalliokoski 2014-08-13
:param temp: temperature (K)
:param diff: diffusion coefficient (m^2/s)
:param m_mol: molar mass (kg/mol)
:return: free path (m)
"""
return 3*diff*np.sqrt((np.pi*m_mol)/(8*gas_const*temp))
|
053a2af667e1e9ecc85f1ea6ca898c80e14ec81f
| 3,641,207
|
def comp_periodicity_spatial(self):
"""Compute the (anti)-periodicities of the machine in space domain
Parameters
----------
self : Machine
A Machine object
Returns
-------
pera : int
Number of spatial periodicities of the machine over 2*pi
is_apera : bool
True if an anti-periodicity is possible after the periodicities
"""
p = self.get_pole_pair_number()
# Get stator (anti)-periodicity in spatial domain
pera_s, is_antipera_s = self.stator.comp_periodicity_spatial()
# Get rotor (anti)-periodicities in spatial domain
pera_r, is_antipera_r = self.rotor.comp_periodicity_spatial()
# Get machine spatial periodicity
pera = int(gcd(gcd(pera_s, pera_r), p))
# Get machine time and spatial anti-periodicities
is_apera = bool(is_antipera_s and is_antipera_r)
return pera, is_apera
|
b34a50f0df0bc1bfd3ffaddb5e8a57780e50a6b8
| 3,641,208
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.