content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def calculate_deltaangle_distance(
org_ligs,
smiles_keys,
fg_factory,
file_prefix=None
):
"""
Calculate the change of bite angle of each ligand in the cage.
This function will not work for cages built from FGs other than
metals + AromaticCNC and metals + AromaticCNN.
Parameters
----------
org_lig : :class:`dict` of :class:`stk.BuildingBlock`
Dictionary of building blocks where the key is the file name,
and the value is the stk building block.
smiles_keys : :class:`dict` of :class:`int`
Key is the linker smiles, value is the idx of that smiles.
fg_factory :
:class:`iterable` of :class:`stk.FunctionalGroupFactory`
Functional groups to asign to molecules.
NN_distance calculator will not work for cages built from FGs
other than metals + AromaticCNC and metals + AromaticCNN.
file_prefix : :class:`str`, optional
Prefix to file name of each output ligand structure.
Eventual file name is:
"file_prefix"{number of atoms}_{idx}_{i}.mol
Where `idx` determines if a molecule is unique by smiles.
Returns
-------
delta_angles : :class:`dict`
Bite angle in cage - free optimised ligand for each ligand.
Output is absolute values.
"""
delta_angles = {}
# Iterate over ligands.
for lig in org_ligs:
stk_lig = org_ligs[lig]
smiles_key = stk.Smiles().get_key(stk_lig)
idx = smiles_keys[smiles_key]
sgt = str(stk_lig.get_num_atoms())
# Get optimized ligand name that excludes any cage
# information.
if file_prefix is None:
filename_ = f'organic_linker_s{sgt}_{idx}_opt.mol'
else:
filename_ = f'{file_prefix}{sgt}_{idx}_opt.mol'
_in_cage = stk.BuildingBlock.init_from_molecule(
stk_lig,
functional_groups=fg_factory
)
_in_cage = _in_cage.with_functional_groups(
functional_groups=get_furthest_pair_FGs(_in_cage)
)
_free = stk.BuildingBlock.init_from_file(
filename_,
functional_groups=fg_factory
)
_free = _free.with_functional_groups(
functional_groups=get_furthest_pair_FGs(_free)
)
angle_in_cage = calculate_bite_angle(bb=_in_cage)
angle_free = calculate_bite_angle(bb=_free)
delta_angles[lig] = abs(angle_in_cage - angle_free)
return delta_angles
|
dfc367300b92561c8b167081121c90e5313187a1
| 3,636,571
|
import logging
import time
def wait_for_file_to_finish_writing(**args) -> tuple:
"""
This wait shouldn't be required but appears to be help with larger files.
"""
config = args.get('config')
logging.info("waiting {} seconds for file to finish writing and unlock".format(config.BULK_IMPORT_WAIT))
time.sleep(config.BULK_IMPORT_WAIT)
return True, args
|
915c5b159030c5860891e95f09ffc725f755c584
| 3,636,572
|
def get_max_id(connection, generic_sensor_type: str) -> int:
"""
Get the max id of a given generic sensor type.
:param generic_sensor_type: "asset", "market", or "weather_sensor"
"""
t_generic_sensor = sa.Table(
generic_sensor_type,
sa.MetaData(),
sa.Column("id", sa.Integer),
)
max_id = connection.execute(
sa.select(
[
sa.sql.expression.func.max(
t_generic_sensor.c.id,
)
]
)
).scalar() # None if there are none
max_id = 0 if max_id is None else max_id
return max_id
|
11a35d9e43e7c403271675fd7b6207d6e16e0c80
| 3,636,573
|
def _temp_dict_file_name():
"""Name of the expected python dictionary as a json file from run_external_python().
.. versionadded:: 9.1
"""
return '__shared_dictionary__'
|
94f33562d775b041387b477d838a5efadfe38f00
| 3,636,574
|
def anisotropic_Gaussian(ksize=25, theta=np.pi, l1=6, l2=6):
"""
https://github.com/cszn/KAIR/blob/master/utils/utils_sisr.py
Generate an anisotropic Gaussian kernel
Args:
ksize : e.g., 25, kernel size
theta : [0, pi], rotation angle range
l1 : [0.1,50], scaling of eigenvalues
l2 : [0.1,l1], scaling of eigenvalues
If l1 = l2, will get an isotropic Gaussian kernel.
Returns:
k : kernel
"""
v = np.dot(np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
V = np.array([[v[0], v[1]], [v[1], -v[0]]])
D = np.array([[l1, 0], [0, l2]])
Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
return k
|
259ae1590807e11d5805c1065fed82acf430b60b
| 3,636,577
|
import warnings
def fakemag_to_parallax(fakemag, mag, fakemag_err=None):
"""
To convert fakemag to parallax, Magic Number will be preserved
:param fakemag: astroNN fakemag
:type fakemag: Union[float, ndarray]
:param mag: apparent magnitude
:type mag: Union[float, ndarray]
:param fakemag_err: Optional, fakemag_err
:type fakemag_err: Union[NoneType, float, ndarray]
:return: array of parallax in mas with astropy Quantity (with additional return of propagated error if fakemag_err is provided)
:rtype: astropy Quantity
:History: 2018-Aug-11 - Written - Henry Leung (University of Toronto)
"""
fakemag = np.array(fakemag)
mag = np.array(mag)
# treat non-positive fakemag as MAGIC_NUMBER, check for magic number and negative fakemag
magic_idx = ((fakemag == MAGIC_NUMBER) | (mag == MAGIC_NUMBER) | (fakemag <= 0.) | np.isnan(fakemag) | np.isnan(mag))
with warnings.catch_warnings(): # suppress numpy Runtime warning caused by MAGIC_NUMBER
warnings.simplefilter("ignore")
parallax = fakemag / (10. ** (0.2 * mag))
if fakemag.shape != (): # check if its only 1 element
parallax[magic_idx] = MAGIC_NUMBER
else: # for float
parallax = MAGIC_NUMBER if magic_idx == [1] else parallax
if fakemag_err is None:
return parallax * u.mas
else:
with warnings.catch_warnings(): # suppress numpy Runtime warning caused by MAGIC_NUMBER
warnings.simplefilter("ignore")
parallax_err = (fakemag_err / fakemag) * parallax
if fakemag.shape != (): # check if its only 1 element
parallax_err[magic_idx] = MAGIC_NUMBER
else: # for float
parallax_err = MAGIC_NUMBER if magic_idx == [1] else parallax_err
return parallax * u.mas, parallax_err * u.mas
|
0086436f41707a74974d6358d101eacc3149777a
| 3,636,581
|
def provenance_stamp(routine):
"""Return dictionary satisfying QCSchema,
https://github.com/MolSSI/QCSchema/blob/master/qcschema/dev/definitions.py#L23-L41
with QCElemental's credentials for creator and version. The
generating routine's name is passed in through `routine`.
"""
return {'creator': 'QCElemental', 'version': get_versions()['version'], 'routine': routine}
|
34c1e11c69d0b0354e356bd0463a9f89cd438d51
| 3,636,582
|
def index_of(y):
"""
A helper function to get the index of an input to plot
against if x values are not explicitly given.
Tries to get `y.index` (works if this is a pd.Series), if that
fails, return np.arange(y.shape[0]).
This will be extended in the future to deal with more types of
labeled data.
Parameters
----------
y : scalar or array-like
The proposed y-value
Returns
-------
x, y : ndarray
The x and y values to plot.
"""
try:
return y.index.values, y.values
except AttributeError:
y = _check_1d(y)
return np.arange(y.shape[0], dtype=float), y
|
fae630e18bf20f1c9762e6c6f9d2d1b2f5cf93e2
| 3,636,583
|
def get_centered_box(center: np.ndarray, box_size: np.ndarray):
"""
Get box of size ``box_size``, centered in the ``center``.
If ``box_size`` is odd, ``center`` will be closer to the right.
"""
start = center - box_size // 2
stop = center + box_size // 2 + box_size % 2
return start, stop
|
4d5ce84547281b27d8405894ce280139696329ba
| 3,636,584
|
def _make_context(frames, cameras):
"""
Generate Context named tuple using camera, frame information
Args:
- cameras:
- frames:
Returns: A Context named tuple encapsulating given information
"""
return Context(cameras=cameras, frames=frames)
|
b338795bf367c7e12b769fa33049e3e52a0daf00
| 3,636,585
|
def get_list_from_file(filename):
"""
Returns a list of containers stored in a file (one on each line)
"""
with open(filename) as fh:
return [_ for _ in fh.read().splitlines() if _]
|
8d9a271aa4adea81f62bf74bb1d3c308870f1baf
| 3,636,587
|
def import_recipe():
"""Import recipe from base64 encoded text."""
form = Import()
errors = None
if form.validate_on_submit():
encoded = request.form["encoded"]
try:
decoded = loads(b64decode(encoded.encode("utf-8")).decode("utf-8"))
# recipe table
title = decoded["title"]
servings = decoded["servings"]
source = decoded["source"]
notes = decoded["notes"]
directions = dumps(decoded["directions"])
db.execute("INSERT INTO recipes (title, servings, source, notes, directions) VALUES "
"(?, ?, ?, ?, ?)", (title, servings, source, notes, directions))
# categories table
recipe_id = db.execute("SELECT recipe_id FROM recipes WHERE title = ?",
(title,))[0]["recipe_id"]
for category in decoded["categories"]:
db.execute("INSERT INTO categories (category, recipe_id) VALUES (?, ?)",
(category, recipe_id))
# ingredients table
for ingredient in decoded["ingredients"]:
db.execute("INSERT INTO ingredients (ingredient, recipe_id) VALUES (?, ?)",
(ingredient, recipe_id))
# owners table
db.execute("INSERT INTO owners (recipe_id, user_id) VALUES (?, ?)",
(recipe_id, session["user_id"]))
flash("Recipe imported!")
return redirect(url_for("index"))
except TypeError:
errors = ["Invalid text."]
return render_template("import.html", form=form, errors=errors)
|
7665af330f029c29e3cf5a2667204a3ba94409c2
| 3,636,588
|
def parse_xyz(filename, nbits):
"""Read xyz format point data and return header, points and points data."""
pointstrings = []
with open(filename) as points_file:
for line in points_file:
if not line.startswith('#'):
if not line.isspace():
line = line.replace(',', ' ')
line = line.split()
pointstrings.append(line)
points = np.zeros((len(pointstrings), 3), dtype=np.float)
if nbits > 0:
datalen = len(pointstrings[0][3:])
pointsdata = np.zeros((len(pointstrings), datalen), dtype=np.int)
for idx, line in enumerate(pointstrings):
coords = line[: 3]
coords = [float(i) for i in coords]
points[idx] = coords
if nbits > 0:
data = line[3:]
data = [int(i) for i in data]
pointsdata[idx] = data
minvals = points.min(axis=0).tolist()
maxvals = points.max(axis=0).tolist()
bbox = [minvals, maxvals]
if nbits > 0:
return bbox, points, pointsdata
else:
return bbox, points, None
|
51a9f7f34bbae5eeddd8b97139ed59ec53e43939
| 3,636,589
|
def compare_vecs(est, truth, zero_tol=0):
"""
Parameters
----------
est: array-like
The estimated vector.
truth: array-like
The true vector parameter.
zero_tol: float
Zero tolerance for declaring an element equal to zero.
Output
------
out: dict
Dictonary containing various error measurements.
"""
est = np.array(est).reshape(-1)
truth = np.array(truth).reshape(-1)
# true norms
true_L2 = np.sqrt((truth ** 2).sum())
true_L1 = abs(truth).sum()
# we will divide by this later so make sure it isn't zero
true_L2 = max(true_L2, np.finfo(float).eps)
true_L1 = max(true_L1, np.finfo(float).eps)
assert len(est) == len(truth)
support_est = abs(est) > zero_tol
support_true = abs(truth) > zero_tol
n = len(est)
resid = est - truth
out = {}
####################
# size of residual #
####################
out['L2'] = np.sqrt((resid ** 2).sum())
out['L1'] = abs(resid).sum()
out['L2_rel'] = out['L2'] / true_L2
out['L1_rel'] = out['L1'] / true_L1
out['MSE'] = out['L2'] / np.sqrt(n)
out['MAE'] = out['L1'] / n
out['max'] = abs(resid).max()
# compare supports
out.update(compare_supports(support_est, support_true))
out['support_auc'] = roc_auc_score(y_true=support_true,
y_score=abs(est))
#################
# compare signs #
#################
_est = deepcopy(est)
_est[~support_est] = 0
_truth = deepcopy(truth)
_truth[~support_true] = 0
out['sign_error'] = np.mean(np.sign(_est) != np.sign(_truth))
# # only compute at true non-zero
# est_at_true_nz = est[nz_mask_true]
# true_at_true_nz = truth[nz_mask_true]
# sign_misses = np.sign(est_at_true_nz) != np.sign(true_at_true_nz)
# out['sign_error'] = np.mean(sign_misses)
return out
|
ef977c31bbca818809f7d708d0ed6f754912239e
| 3,636,590
|
def accumulator(init, update):
"""
Generic accumulator function.
.. code-block:: python
# Simplest Form
>>> a = 'this' + ' '
>>> b = 'that'
>>> c = functools.reduce(accumulator, a, b)
>>> c
'this that'
# The type of the initial value determines output type.
>>> a = 5
>>> b = Hello
>>> c = functools.reduce(accumulator, a, b)
>>> c
10
:param init: Initial Value
:param update: Value to accumulate
:return: Combined Values
"""
return (
init + len(update)
if isinstance(init, int) else
init + update
)
|
6a4962932c8dba4d5c01aa8936787b1332a6323f
| 3,636,591
|
def process_po_folder(domain, folder, extra=''):
""" Process each PO file in folder """
result = True
for fname in glob.glob(os.path.join(folder, '*.po')):
basename = os.path.split(fname)[1]
name = os.path.splitext(basename)[0]
mo_path = os.path.normpath('%s/%s%s' % (MO_DIR, name, MO_LOCALE))
mo_name = '%s.mo' % domain
if not os.path.exists(mo_path):
os.makedirs(mo_path)
# Create the MO file
mo_file = os.path.join(mo_path, mo_name)
print 'Compile %s' % mo_file
ret, output = run('%s %s -o "%s" "%s"' % (TOOL, extra, mo_file, fname))
if ret != 0:
print '\nMissing %s. Please install this package first.' % TOOL
exit(1)
if 'WARNING:' in output:
print output
result = False
return result
|
c89a7952d9961ec096dac98f1d830a24b4d62ecd
| 3,636,592
|
def create_service(
*,
db_session: Session = Depends(get_db),
service_in: ServiceCreate = Body(
...,
example={
"name": "myService",
"type": "pagerduty",
"is_active": True,
"external_id": "234234",
},
),
):
"""
Create a new service.
"""
service = get_by_external_id_and_project_name(
db_session=db_session,
external_id=service_in.external_id,
project_name=service_in.project.name,
)
if service:
raise HTTPException(
status_code=400,
detail=f"A service with this identifier ({service_in.external_id}) already exists.",
)
service = create(db_session=db_session, service_in=service_in)
return service
|
890928f0a5b1a990ea27594886031bf6ede1a0db
| 3,636,593
|
import json
def get_handler(event, context): # pylint: disable=unused-argument
"""REST API GET method to get data about a Minecraft game server."""
# gather the server data
name = event.get('pathParameters', {}).get('name')
server = gather(name)
# return the HTTP payload
return {
'statusCode': 200,
'body': json.dumps(server)
}
|
61326050cbac4ad3a7a727ebef01bd7e496a254c
| 3,636,597
|
def get_item(dataframe: DataFrame, col: str, new_col: str, index: any) -> DataFrame:
"""Return DF with a column that contains one item for an array
:param str col: name of the column
:param str new_col: type of the new column
:param any index: the index key
Examples:
```
SectionName:
Type: transform::generic
Input: InputBlock
Properties:
Functions:
- get_item:
col: name
new_col: firstname
index: 2
```
"""
_validate_column_exists(dataframe, col)
return dataframe.withColumn(new_col, F.col(col).getItem(index))
|
e06090dad60f7522b1727d69926994bb94f669d6
| 3,636,598
|
from typing import Union
from re import T
from typing import Sequence
def inject(
dependency: Union[T, str],
*,
namespace: str = None,
group: str = None,
exclude_groups: Sequence[str] = None,
lazy: bool = False,
optional: bool = False,
) -> T:
"""
Injects the requested dependency by instantiating a new instance of it or a
singleton instance if specified by the injectable. Returns an instance of the
requested dependency.
One can use this method directly for injecting dependencies though this is not
recommended. Use the :meth:`@autowired <injectable.autowired>` decorator and the
:class:`Autowired <injectable.Autowired>` type annotation for dependency injection
to be automatically wired to a function's call instead.
Will log a warning indicating that the injection container is empty when invoked
before :meth:`load_injection_container <injectable.load_injection_container>` is
called.
Raises
:class:`InjectionError <injectable.errors.InjectionError>`
when unable to resolve the requested dependency. This can be due to a variety of
reasons: the requested dependency wasn't loaded into the container; the namespace
isn't correct; the group isn't correct; there are multiple injectables for the
dependency and none or multiple are marked as primary. When parameter ``optional``
is ``True`` no error will be raised when no injectable that matches requested
qualifier/class and group is found in the specified namespace though in ambiguous
cases that resolving a primary injectable is impossible an error will still be
raised.
:param dependency: class, base class or qualifier of the dependency to be used for
lookup among the registered injectables.
:param namespace: (optional) namespace in which to look for the dependency. Defaults
to :const:`injectable.constants.DEFAULT_NAMESPACE`.
:param group: (optional) group to filter out other injectables outside of this
group. Defaults to None.
:param exclude_groups: (optional) list of groups to be excluded. Defaults to None.
:param lazy: (optional) when True will return an instance which will automatically
initialize itself when first used but not before that. Defaults to False.
:param optional: (optional) when True this function returns None if no injectable
matches the qualifier/class and group inside the specified namespace instead
of raising an :class:`InjectionError <injectable.errors.InjectionError>`.
Ambiguous cases where resolving a primary injectable is impossible will
still raise :class:`InjectionError <injectable.errors.InjectionError>`.
Defaults to False.
Usage::
>>> from foo import Foo
>>> from injectable import inject
>>>
>>> class Bar:
... def __init__(self, foo: Foo = None):
... self.foo = foo or inject(Foo)
"""
dependency_name = get_dependency_name(dependency)
registry_type = get_dependency_registry_type(dependency)
matches = get_namespace_injectables(
dependency_name, registry_type, namespace or DEFAULT_NAMESPACE
)
if not matches:
if not optional:
raise InjectionError(
f"No injectable matches {registry_type.value} '{dependency_name}'"
)
return None
if group is not None or exclude_groups is not None:
matches = filter_by_group(matches, group, exclude_groups)
if not matches:
if not optional:
raise InjectionError(
f"No injectable for {registry_type.value} '{dependency_name}'"
f" matches group '{group}'"
)
return None
injectable = resolve_single_injectable(dependency_name, registry_type, matches)
return injectable.get_instance(lazy=lazy)
|
ba2524875d13c388e157c9994ae4c380aafe9e52
| 3,636,600
|
import logging
def load2(file, collapsed=True, index=None):
"""Loads Laue diffraction data."""
if file['stacked'] is True:
files = loadstack(file)
if file['ext'] == 'h5':
vals = loadh5files(files, file['h5']['key'])
else:
if file['ext'] == 'h5':
begin, end, step = file['range']
vals = loadh5(file['path'], file['h5']['key'])[begin:step:end]
vals = np.swapaxes(vals, 0, 2)
vals = np.swapaxes(vals, 0, 1)
vals = vals.copy()
if index is None:
index = cherrypickpixels(vals, file['threshold'], file['frame'])
if collapsed is True:
vals = collapse(vals, index)
datasize = vals.shape[0] * vals.shape[1] * 4e-6 # [MB]
else:
datasize = vals.shape[0] * vals.shape[1] * vals.shape[2] * 4e-6 # [MB]
logging.info(
"Data size: {}, {:.2f} MB".format(
vals.shape, datasize))
return vals, index
|
f066d6e738dfd2ae4b503f8416fc5df6384f7a5d
| 3,636,602
|
import logging
def get_api_user(name):
"""
Check if the user is registered on faceit
:returns 1 Ok
:returns None nOk
"""
try:
logging.info("get_api_data_user")
faceit_data = FaceitData(FACEIT_API)
user = faceit_data.player_details(name)
if user:
return 1
except ValueError:
logging.error("Faceit Name is not correct !")
return None
|
16643b499162226d65f217d2bdc9c78f64424507
| 3,636,603
|
def get_storage_client():
"""Return storage client."""
global _client
if not _client:
_client = storage.Client()
return _client
|
fe54dd0c0f922a2b6413cab792feda9313e15e02
| 3,636,604
|
def upload_file_to_s3(image, fileStoreObj, acl="public-read"):
"""S3 file uploader."""
app = current_app._get_current_object()
s3 = boto3.client(
"s3",
aws_access_key_id=app.config['S3_KEY'],
aws_secret_access_key=app.config['S3_SECRET']
)
try:
s3.put_object(Body=image,
Bucket=app.config['S3_BUCKET'],
ACL=acl,
ContentType=fileStoreObj.content_type,
Key=fileStoreObj.filename)
except Exception as e:
print("An Error occurred: ", e)
return e
return "{}{}".format(app.config["S3_LOCATION"],
fileStoreObj.filename)
|
f8505858fa341cc9d7420cc04817c9169f10d182
| 3,636,605
|
def vae_bc(
transitions=None,
# Adam optimizer settings
lr_enc=1e-3,
lr_dec=1e-3,
# Training settings
minibatch_size=100,
):
"""
VAE Behavioral Cloning (VAE-BC) control preset
Args:
transitions:
dictionary of transitions generated by cpprb.ReplayBuffer.get_all_transitions()
lr_enc (float): Learning rate for the encoder.
lr_dec (float): Learning rate for the decoder.
minibatch_size (int): Number of experiences to sample in each training update.
"""
def _vae_bc(env):
disable_on_policy_mode()
device = get_device()
latent_dim = env.action_space.shape[0] * 2
encoder_model = fc_bcq_encoder(env, latent_dim=latent_dim).to(device)
encoder_optimizer = Adam(encoder_model.parameters(), lr=lr_enc)
encoder = BcqEncoder(
model=encoder_model,
latent_dim=latent_dim,
optimizer=encoder_optimizer,
name="encoder",
)
decoder_model = fc_bcq_decoder(env, latent_dim=latent_dim).to(device)
decoder_optimizer = Adam(decoder_model.parameters(), lr=lr_dec)
decoder = BcqDecoder(
model=decoder_model,
latent_dim=latent_dim,
space=env.action_space,
optimizer=decoder_optimizer,
name="decoder",
)
replay_buffer = ExperienceReplayBuffer(1e7, env)
if transitions is not None:
samples = replay_buffer.samples_from_cpprb(
transitions, device="cpu")
replay_buffer.store(samples)
set_replay_buffer(replay_buffer)
return VaeBC(
encoder=encoder,
decoder=decoder,
minibatch_size=minibatch_size,
)
return _vae_bc
|
c447dba04864b6eb8a9312135cd656e13a40c971
| 3,636,606
|
import itertools
import six
def get_mode_fn(num_gpus, variable_strategy, num_workers):
"""Returns a function that will build shadownet model."""
def _mode_fun(features, labels, mode, params):
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
tower_features = features
tower_score_maps = labels[0]
tower_geo_maps = labels[1]
tower_training_masks = labels[2]
tower_losses = []
tower_gradvars = []
tower_summaries = []
num_devices = FLAGS.num_gpus
device_type = 'gpu'
reuse_variables = None
for i in range(num_devices):
worker_device = '/{}:{}'.format(device_type, i)
device_setter = local_device_setter(worker_device=worker_device)
with tf.name_scope('tower_%d' % i) as name_scope:
with tf.device(device_setter):
total_loss, gradvars, summaries = _tower_fn(
is_training,
tower_features[i],
tower_score_maps[i],
tower_geo_maps[i],
tower_training_masks[i],
reuse_variables)
tower_losses.append(total_loss)
tower_gradvars.append(gradvars)
tower_summaries.append(summaries)
reuse_variables = True
if i == 0:
# Only trigger batch_norm moving mean and variance update from
# the 1st tower. Ideally, we should grab the updates from all
# towers but these stats accumulate extremely fast so we can
# ignore the other stats from the other towers without
# significant detriment.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
name_scope)
# Now compute global loss and gradients.
gradvars = []
with tf.name_scope('gradient_averaging'):
all_grads = {}
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in six.iteritems(all_grads):
# Average gradients on the same device as the variables
with tf.device(var.device):
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
if FLAGS.pretrained_model_path is not None:
tf.train.init_from_checkpoint(FLAGS.pretrained_model_path, {"resnet_v1_50/":"resnet_v1_50/"})
# restore only once
FLAGS.pretrained_model_path = None
# Device that runs the ops to apply global gradient updates.
consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'
with tf.device(consolidation_device):
global_step = tf.train.get_global_step()
starter_learning_rate = FLAGS.learning_rate
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
FLAGS.decay_steps, FLAGS.decay_rate,
staircase=True)
loss = tf.reduce_mean(tower_losses, name='loss')
tensors_to_log = {'global_step': global_step, 'learning_rate': learning_rate, 'loss': loss}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=10)
summary_hook = tf.train.SummarySaverHook(
save_steps=10,
output_dir='/data/output/',
summary_op=tower_summaries[0])
train_hooks = [logging_hook, summary_hook]
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
if FLAGS.sync:
optimizer = tf.train.SyncReplicasOptimizer(
optimizer, replicas_to_aggregate=num_workers)
sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)
train_hooks.append(sync_replicas_hook)
# save moving average
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Create single grouped train op
train_op = [
optimizer.apply_gradients(
gradvars, global_step=tf.train.get_global_step()),
variables_averages_op
]
train_op.extend(update_ops)
train_op = tf.group(*train_op)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
training_hooks=train_hooks)
return _mode_fun
|
17f3465485ef5388fcacd0c8aeeac9ff4eb89651
| 3,636,607
|
def link_to_profile(request):
"""
If the user is a temporary one who was logged in via
an institution (not through a Uniauth profile), offers
them the choice between logging to an existing Uniauth
account or creating a new one.
The institution account is (eventually) linked to the
Uniauth profile the user logged into / created.
"""
next_url = request.GET.get('next')
context = _get_global_context(request)
if not next_url:
next_url = get_redirect_url(request)
params = urlencode({'next': next_url})
context['next_url'] = next_url
# If the user is not authenticated at all, redirect to login page
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse('uniauth:login') + '?' + params)
# If the user is already authenticated + verified, proceed to next page
if not is_tmp_user(request.user) and not is_unlinked_account(request.user):
return HttpResponseRedirect(next_url)
# If the user is temporary, but was not logged in via an institution
# (e.g. created through Uniauth, but not verified), redirect to signup
if not is_unlinked_account(request.user):
return HttpResponseRedirect(reverse('uniauth:signup') + '?' + params)
# At this point, we've ensured the user is temporary and was
# logged in via an institution. We just need to handle the
# Login Form, if the user chooses to link to an existing account.
# If it's a POST request, attempt to validate the form
if request.method == "POST":
form = LoginForm(request, request.POST)
# Authentication successful
if form.is_valid():
unlinked_user = request.user
username_split = get_account_username_split(request.user.username)
# Log in as the authenticated Uniauth user
user = form.get_user()
auth_login(request, user)
# Merge the unlinked account into the logged in profile,
# then add the institution account described by the username
merge_model_instances(user, [unlinked_user])
_add_institution_account(user.uniauth_profile, username_split[1],
username_split[2])
slug = username_split[1]
context['institution'] = Institution.objects.get(slug=slug)
return render(request, 'uniauth/link-success.html', context)
# Authentication failed: render form errors
else:
context['form'] = form
return render(request, 'uniauth/link-to-profile.html', context)
# Otherwise, render a blank Login form
else:
form = LoginForm(request)
context['form'] = form
return render(request, 'uniauth/link-to-profile.html', context)
|
63e8bfa1e226cf2247e01e0fa5c6aa30365256bc
| 3,636,608
|
from pathlib import Path
def obtain_fea_im_subset(Row_range, Col_range, tsList, ts_stack_foler, fC_hdr, fC_img, bandName):
"""
fea_im_subset = eng.zeros(int(tsLen),int(num_fea),int(d2),int(d1)) # this is matlab.double type
to convert matlab.double to ndarray
For one-dimensional arrays, access only the "_data" property of the Matlab array.
For multi-dimensional arrays you need to reshape the array afterwards.
np.array(x._data).reshape(x.size[::-1]).T
"""
logger.info("obtain_fea_im_subset")
d1 = Row_range[1] - Row_range[0] + 1
d2 = Col_range[1] - Col_range[0] + 1
num_band = len(bandName)
num_fea = num_band + 1
tsLen = len(tsList)
dim_fea = (d1, d2, num_fea, tsLen)
fea_im_subset = np.zeros(dim_fea)
FILL = 0
for t in range(0, tsLen):
for i in range(0, num_band):
hdrPath_t_i = ts_stack_foler + "/" + fC_hdr[bandName[i]][t]
imgPath_t_i = ts_stack_foler + "/" + fC_img[bandName[i]][t]
# print(imgPath_t_i)
# --------------------Check file existance--------------------------
if Path(hdrPath_t_i).is_file():
# logger.info(hdrPath_t_i)
info = envi.read_envi_header(hdrPath_t_i)
img = envi.open(hdrPath_t_i)
img_open = img.open_memmap(writeable=True)
im_t_i = img_open[Row_range[0] - 1:Row_range[1], Col_range[0] - 1:Col_range[1], 0]
# print(im_t_i.shape)
# im_t_i = np.copy(img_open[:Row_range[1]+1,:Col_range[1],0])
else:
logger.info("The -%d-th TS is empty!!!" % t)
print("The -%d-th TS is empty!!!" % t)
im_nan = np.zeros((d1, d2))
im_nan[im_nan == 0] = np.nan
im_t_i = im_nan
FILL = 1
# fea_im_subset[t][i][:][:]
fea_im_subset[:, :, i, t] = im_t_i
if FILL == 1:
nans, x = np.isnan(fea_im_subset), lambda z: z.nonzero()[0]
fea_im_subset[nans] = np.interp(x(nans), x(~nans),
fea_im_subset[~nans]) # linear Interpolation, in Matlab: Cubic spline
# fea_im_subset = eng.fillmissing(fea_im_subset,"spline");
for t in range(0, tsLen):
# Calculate additional features ---------------------------------------
fea_im_subset[:, :, num_band, t] = fea_im_subset[:, :, 1, t] - fea_im_subset[:, :, 0, t]
return fea_im_subset
|
257156c63d3ea06f53ed904dcddcc3c60bf9d937
| 3,636,609
|
def himmelblau(individual):
"""The Himmelblau's function is multimodal with 4 defined minimums in
:math:`[-6, 6]^2`.
.. list-table::
:widths: 10 50
:stub-columns: 1
* - Type
- minimization
* - Range
- :math:`x_i \in [-6, 6]`
* - Global optima
- :math:`\mathbf{x}_1 = (3.0, 2.0)`, :math:`f(\mathbf{x}_1) = 0`\n
:math:`\mathbf{x}_2 = (-2.805118, 3.131312)`, :math:`f(\mathbf{x}_2) = 0`\n
:math:`\mathbf{x}_3 = (-3.779310, -3.283186)`, :math:`f(\mathbf{x}_3) = 0`\n
:math:`\mathbf{x}_4 = (3.584428, -1.848126)`, :math:`f(\mathbf{x}_4) = 0`\n
* - Function
- :math:`f(x_1, x_2) = (x_1^2 + x_2 - 11)^2 + (x_1 + x_2^2 -7)^2`
.. plot:: code/benchmarks/himmelblau.py
:width: 67 %
"""
return (individual[0] * individual[0] + individual[1] - 11)**2 + \
(individual[0] + individual[1] * individual[1] - 7)**2,
|
2fcf348e01f33a54d847dfc7f9a225ed043e36a4
| 3,636,610
|
def get_deb_architecture():
"""
Returns the deb architecture of the local system, e.g. amd64, i386, arm
"""
return local('dpkg --print-architecture', capture=True)
|
a2c4ac9845bb395210043b8ebd7447596203a55b
| 3,636,611
|
def Ry(angle, degrees=False):
"""Generate the :math:`3\\times3` rotation matrix :math:`R_y(\\theta)` \
for a rotation about the :math:`y` axis by an angle :math:`\\theta`.
Parameters
----------
angle : float
The rotation angle :math:`\\theta` in *radians*. If the angle is
given in *degrees*, then you must set `degrees=True` to correctly
calculate the rotation matrix.
degrees : bool, optional
if `True`, then `angle` is converted from degrees to radians.
Returns
-------
:class:`~numpy:numpy.ndarray`
:math:`3\\times3` rotation matrix :math:`R_y(\\theta)` for a
rotation about the :math:`y` axis by an angle :math:`\\theta`:
.. math::
R_y = \\begin{pmatrix}
\\cos\\theta & 0 & \\sin\\theta\\\\
0 & 1 & 0\\\\
-\\sin\\theta & 0 & \\cos\\theta
\\end{pmatrix}
Examples
--------
>>> import numpy as np
>>> from sknano.core.math import Ry
>>> Ry(np.pi/4)
array([[ 0.70710678, 0. , 0.70710678],
[ 0. , 1. , 0. ],
[-0.70710678, 0. , 0.70710678]])
>>> np.alltrue(Ry(np.pi/4) == Ry(45, degrees=True))
True
"""
if degrees:
angle = np.radians(angle)
cosa = np.cos(angle)
sina = np.sin(angle)
Rmat = np.array([[cosa, 0.0, sina], [0.0, 1.0, 0.0], [-sina, 0.0, cosa]])
Rmat[np.where(np.abs(Rmat) <= np.finfo(float).eps)] = 0.0
return Rmat
|
e10d95eb36051da7e87540ebe24a02c092861b36
| 3,636,612
|
def zhongzhuang_adjustment_reservoir():
"""
Real Name: ZhongZhuang Adjustment Reservoir
Original Eqn: INTEG ( IF THEN ELSE(Transfer From ZhongZhuangWeir To ZhongZhuangAdjustmentReservoir+ZhongZhuang Adjustment Reservoir\ -Transfer From ZhongZhuangAdjustmentReservoir To BanXinWPP-Transfer From ZhongZhuangAdjustmentReservoir To DaNanWPP\ -ZhongZhuangAdjustmentReservoir Transfer Loss Amount>=5.05e+006, 0 , Transfer From ZhongZhuangWeir To ZhongZhuangAdjustmentReservoir\ -Transfer From ZhongZhuangAdjustmentReservoir To BanXinWPP-Transfer From ZhongZhuangAdjustmentReservoir To DaNanWPP\ -ZhongZhuangAdjustmentReservoir Transfer Loss Amount ), 5.05e+006)
Units: m3
Limits: (None, None)
Type: component
Max Storage Valume = 5050000 m^3 (2017); general output = 24000 m^3 per day(BanXin WPP assumes 15000 m^3; DaNan WPP assumes
9000 m^3 ; overflow height 68m, designed flood discharge 2.83CMS, water input limit
10cms.
"""
return integ_zhongzhuang_adjustment_reservoir()
|
1fcc4a00015a3b8c1dadfc414455eab626704366
| 3,636,615
|
import requests
import random
def _request_esi_status() -> requests.Response:
"""Make request to ESI about curren status with retries."""
max_retries = 3
retries = 0
while True:
try:
r = requests.get(
"https://esi.evetech.net/latest/status/",
timeout=(5, 30),
headers={"User-Agent": f"{__package__};{__version__}"},
)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError):
logger.warning("Network error when trying to call ESI", exc_info=True)
return EsiStatus(
is_online=False, error_limit_remain=None, error_limit_reset=None
)
if r.status_code not in {
502, # HTTPBadGateway
503, # HTTPServiceUnavailable
504, # HTTPGatewayTimeout
}:
break
else:
retries += 1
if retries > max_retries:
break
else:
logger.warning(
"HTTP status code %s - Retry %s/%s",
r.status_code,
retries,
max_retries,
)
wait_secs = 0.1 * (random.uniform(2, 4) ** (retries - 1))
sleep(wait_secs)
return r
|
39a74a766f77e66ae0fa33ac62b9ad09698a83be
| 3,636,616
|
def RunExampleConsumer(serialized_file_graph):
"""Runs the example consumer on the serialized_file_graph.
Args:
serialized_file_graph: mojom_files.MojomFileGraph as output by the mojom
parser.
Returns:
The integer exit code of the example consumer.
"""
examples_dir = os.path.dirname(os.path.abspath(__file__))
example_consumer = os.path.join(examples_dir, 'example_consumer.go')
src_root = os.path.abspath(os.path.join(examples_dir, '../../..'))
environ = { 'GOPATH': os.path.dirname(src_root) }
print environ
cmd = ['go', 'run', example_consumer]
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, env=environ)
process.communicate(serialized_file_graph)
return process.wait()
|
42fbc45f1e3ace3f1774ac631cef9811f01a2915
| 3,636,617
|
from typing import Callable
from typing import Any
def linnworks_api_session(func: Callable) -> Callable:
"""Use a Linnworks API session as a method decorator."""
def wrapper_linnapi_session(*args: Any, **kwargs: Any) -> Any:
with LinnworksAPISession():
return func(*args, **kwargs)
return wrapper_linnapi_session
|
fb94e4f0ed1e0477e5b058c6adfd48712b7bfb12
| 3,636,618
|
def series_quat2euler(q0, q1, q2, q3, msg_name=""):
"""Given pandas series q0-q4, compute series roll, pitch, yaw.
Arguments:
q0-q4 -- quaternion entries
Keyword arguments:
msg_name -- name of the message for which the euler angles should be computed (default "")
"""
yaw, pitch, roll = np.array(
[
tf.quat2euler([q0i, q1i, q2i, q3i])
for q0i, q1i, q2i, q3i in zip(q0, q1, q2, q3)
]
).T
yaw = pd.Series(name=msg_name + "yaw", data=yaw, index=q0.index)
pitch = pd.Series(name=msg_name + "pitch", data=pitch, index=q0.index)
roll = pd.Series(name=msg_name + "roll", data=roll, index=q0.index)
return roll, pitch, yaw
|
bab831238025584a275595a8b6c038f52704047e
| 3,636,619
|
def _ShiftRight(x0, xs):
"""Shifts xs[:-1] one step to the right and attaches x0 on the left."""
return tf.concat([[x0], xs[:-1]], axis=0)
|
9e6936432e4a7b7317560d6b2e32ac8a576d2d22
| 3,636,620
|
def compute_connected_components(self, compute_nx=True, probed_node=None, comps_to_merge=None, current_norm_vals=None):
"""
Computes the NORMALIZED connected components of the selfwork.
If compute_nx is True, actually computes components from scratch using selfworkx.
Otherwise, we update self.connected_components, self.connected_component_sizes, and
self.components based on comps_to_merge.
UPDATE: As of 7/25, we now keep track of node_component_sizes, i.e. the size of the
component each node is in. This is managed in BOTH update_neighbors AND
compute_connected_components. It is an np array with entries corresponding to rows in the
feature matrix.
Returns np array of normalized component sizes.
"""
if compute_nx:
# compute the nx components
self.connected_components = {k:c for k, c in enumerate(nx.connected_components(self.G))}
connected_components = self.connected_components
self.components = np.zeros((len(self.node_to_row)), dtype=int)
self.connected_component_sizes = dict()
self.node_component_sizes = np.zeros((len(self.node_to_row)))
# initialize min/max
min_component_size = float('inf')
max_component_size = 0
# loop sets self.connected_component_sizes, self.components, max/min
for i, component in self.connected_components.items():
size = len(component)
self.connected_component_sizes[i] = size
if size < min_component_size:
min_component_size = size
if size > max_component_size:
max_component_size = size
for node in component:
self.components[self.node_to_row[node]] = i
self.node_component_sizes[self.node_to_row[node]] = size
else:
# No need to compute in this case
connected_components = self.connected_components
# For each component:
min_component_size = self.min_comp_size
max_component_size = self.max_comp_size
probed_comp = self.components[self.node_to_row[probed_node]]
if comps_to_merge is None:
comps_to_merge = []
for comp in comps_to_merge:
# Keep probed_node's component (arbitrary choice),
# add all of each other component's nodes + size to probed component
self.connected_components[probed_comp].update(self.connected_components[comp])
self.connected_component_sizes[probed_comp] = len(self.connected_components[probed_comp])
for node in self.connected_components[comp]:
self.components[self.node_to_row[node]] = probed_comp
# pop the old component from the dictionaries
self.connected_components.pop(comp)
self.connected_component_sizes.pop(comp)
# update probed component size across the board
self.node_component_sizes[np.where(self.components == probed_comp)] = self.connected_component_sizes[probed_comp]
# If the min/max size changed, will need to recompute normalized value for ALL
# components, rather than just the probed node's component.
new_min = min(self.connected_component_sizes.values())
if self.connected_component_sizes[probed_comp] > max_component_size:
new_max = self.connected_component_sizes[probed_comp]
else:
new_max = max_component_size
if new_min != min_component_size or new_max != max_component_size:
compute_nx = True # NOTE re-using this flag is a bit adhoc, but it works
min_component_size = new_min
max_component_size = new_max
self.max_comp_size = max_component_size
self.min_comp_size = min_component_size
diff = float(self.max_comp_size - self.min_comp_size)
# Recompute normalization
if compute_nx:
# if there's more than one component, compute the normalized values
if len(connected_components) > 1 and diff > 0:
# Calculate (mycomponent-min_component) / (max_component-min_component)
return (self.node_component_sizes - self.min_comp_size) / diff
else: # otherwise, everyone is in the same sized component
return np.ones(len(self.node_to_row.keys()))
else:
if diff > 0:
new_val = float(self.connected_component_sizes[probed_comp]-self.min_comp_size)/diff
else:
new_val = 1.0
current_norm_vals[np.where(self.components == probed_comp)] = new_val
return current_norm_vals
|
de4c60c590e4444e15189ac59883bf2535f1c510
| 3,636,621
|
import json
def search_salary(request):
""" This function will be called by search API """
logger.info("Received a salary request {}".format(request.method))
if request.method == 'GET':
try:
request_json_body = json.loads(request.body)
title = request_json_body['title']
location = request_json_body['location']
logger.info("The request has title {} and location {}".format(title, location))
except:
response_body = {'Error': 'Bad Request'}
logger.info("Wrong parameters have been passed".format(request.body))
return HttpResponse(status=400, content=json.dumps(response_body), content_type='application/json')
if ',' in location:
city, state = location.split(',')
else:
city = location
try:
state = city_to_state_dict[city.title()]
except:
response_body = {'Error': 'Did you spell the city name correctly? '
'Could you use the closest metropolitan cityCould you add state name?'}
logger.info("Wrong city name {} ".format(request.body))
return HttpResponse(status=400, content=json.dumps(response_body), content_type='application/json')
state_abbreviation = us_states[state.lower().strip()]
query_body = query_builder(title=title, city=city, state=state_abbreviation)
response = search_in_es(index_name=salary_index_name, query_body=query_body)
response_body = parse_build_response(response)
status = 200
else:
logger.info("WRONG METHOD for salary request {}".format(request.method))
status = 405
response_body = {'Error': 'Method Not Allowed'}
return HttpResponse(status=status, content=json.dumps(response_body), content_type='application/json')
|
72de0064dbfad0e4692c2c0ffb240111432fc8e8
| 3,636,623
|
def get_wordnet_pos(treebank_tag):
"""Function to translate TreeBank PoS tags into PoS tags that WordNet
understands."""
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return None
|
b559b767ffa8a8f87d7aad571f2c88fbc49f3ec1
| 3,636,625
|
import random
import time
def benchmark(problem_file, test_set_file):
""" Evaluates planners with a random problem from a given problem set and world map.
Assumes feasible paths can be calculated.
:param problem_file: A string of map file with .map extension
:param test_set_file: A string of problem set file with .scen extension
:return: Returns a tuple of (results_optimal, results_random) where each element is a custom data structure
carrying calculated path, path length and time elapsed to calculate path.
"""
class Results(object):
def __init__(self, path, path_length, time_elapsed):
self.path = path
self.path_length = path_length
self.time_elapsed = time_elapsed
world = tools.read_world_file(problem_file)
f = open(test_set_file, 'r')
problems = f.readlines()
# Pick random problem
problem_str = problems[random.randint(1, len(problems) - 1)].split()
# Parse problem string
start_pose = int(problem_str[5]), int(problem_str[4])
goal_pose = int(problem_str[7]), int(problem_str[6])
# Evaluate optimal planner
t = time.time()
path = algorithms.planner_optimal(world, start_pose, goal_pose)
time_ms = tools.sec_to_ms((time.time() - t))
results_optimal = Results(path, tools.path_length(path), time_ms)
# Evaluate random planner
t = time.time()
path = algorithms.planner_random(world, start_pose, goal_pose, max_step_number=100000)
time_ms = tools.sec_to_ms((time.time() - t))
results_random = Results(path, tools.path_length(path), time_ms)
return results_optimal, results_random
|
dcb2bc13dc0a6ecaf1b16d69bd166fd2fb1d8ffc
| 3,636,627
|
def _recursive_pairwise_outer_join(
dataframes_to_merge, on, lsuffix, rsuffix, npartitions, shuffle
):
"""
Schedule the merging of a list of dataframes in a pairwise method. This is a recursive function that results
in a much more efficient scheduling of merges than a simple loop
from:
[A] [B] [C] [D] -> [AB] [C] [D] -> [ABC] [D] -> [ABCD]
to:
[A] [B] [C] [D] -> [AB] [CD] -> [ABCD]
Note that either way, n-1 merges are still required, but using a pairwise reduction it can be completed in parallel.
:param dataframes_to_merge: A list of Dask dataframes to be merged together on their index
:return: A single Dask Dataframe, comprised of the pairwise-merges of all provided dataframes
"""
number_of_dataframes_to_merge = len(dataframes_to_merge)
merge_options = {
"on": on,
"lsuffix": lsuffix,
"rsuffix": rsuffix,
"npartitions": npartitions,
"shuffle": shuffle,
}
# Base case 1: just return the provided dataframe and merge with `left`
if number_of_dataframes_to_merge == 1:
return dataframes_to_merge[0]
# Base case 2: merge the two provided dataframe to be merged with `left`
if number_of_dataframes_to_merge == 2:
merged_ddf = dataframes_to_merge[0].join(
dataframes_to_merge[1], how="outer", **merge_options
)
return merged_ddf
# Recursive case: split the list of dfs into two ~even sizes and continue down
else:
middle_index = number_of_dataframes_to_merge // 2
merged_ddf = _recursive_pairwise_outer_join(
[
_recursive_pairwise_outer_join(
dataframes_to_merge[:middle_index], **merge_options
),
_recursive_pairwise_outer_join(
dataframes_to_merge[middle_index:], **merge_options
),
],
**merge_options,
)
return merged_ddf
|
7d65d01cce313ed0517fd685045978dee6d7cb08
| 3,636,628
|
def signum(x):
"""cal signum
:param x:
:return:
"""
if x > 0:
return 1.0
if x < 0:
return -1.0
if x == 0:
return 0
|
0f8e67eb8fa3267ec341d17440270ce68ca8b446
| 3,636,629
|
import re
def is_date(word):
"""
is_date()
Purpose: Checks if word is a date.
@param word. A string.
@return the matched object if it is a date, otherwise None.
>>> is_date('2015-03-1') is not None
True
>>> is_date('2014-02-19') is not None
True
>>> is_date('03-27-1995') is not None
True
>>> is_date('201') is not None
False
>>> is_date('0') is not None
False
"""
regex = r'^(\d\d\d\d-\d\d-\d|\d\d?-\d\d?-\d\d\d\d?|\d\d\d\d-\d\d?-\d\d?)$'
return re.search(regex, word)
|
004bef4ac50f3ebd859cb35086c6e820f4c6e231
| 3,636,630
|
import torch
def test_epoch(model, base_dist, test_loader, epoch,
device=None, annealing=False):
"""Calculate validation loss.
Args:
model: instance of CVAE
base_dist: r1(z) prior distribution
test_loader: instance of pytorch DataLoader
device: device to use
annealing: whether to anneal the KL loss
Returns:
average reconstruction loss and kl loss over test_loader
"""
# KL weight annealing. This is needed to avoid posterior collapse.
if annealing:
kl_weight = torch.tensor(
kl_weight_schedule(epoch, quiet=True)).to(device)
else:
kl_weight = torch.tensor(1.0).to(device)
with torch.no_grad():
model.eval()
total_reconstruction_loss = 0.0
total_kl_loss = 0.0
for h, x in test_loader:
if device is not None:
h = h.to(device, non_blocking=True)
x = x.to(device, non_blocking=True)
# Sample a noise realization
y = h + torch.randn_like(h)
reconstruction_loss, kl_loss = model(x, y, base_dist)
# Keep track of total of each loss
total_reconstruction_loss += reconstruction_loss.sum()
total_kl_loss += kl_loss.sum()
avg_reconstruction_loss = (total_reconstruction_loss.item() /
len(test_loader.dataset))
avg_kl_loss = (total_kl_loss.item() /
len(test_loader.dataset))
avg_loss = avg_reconstruction_loss + kl_weight.item() * avg_kl_loss
print('Test set: Average Loss: {:.4f}\t=\t'
'Reconstruction loss: {:.4f}\t +'
'\t (KL weight) * KL loss: {:.4f}\n'.format(
avg_loss,
avg_reconstruction_loss, avg_kl_loss))
return avg_loss, avg_reconstruction_loss, avg_kl_loss
|
36a5d2fc9d229dca98f1a36f588436ab60a92754
| 3,636,631
|
from datetime import datetime
def get_rest_value_from_path(status, device_class, path: str):
"""Parser for REST path from device status."""
if "/" not in path:
attribute_value = status[path]
else:
attribute_value = status[path.split("/")[0]][path.split("/")[1]]
if device_class == DEVICE_CLASS_TIMESTAMP:
last_boot = datetime.utcnow() - timedelta(seconds=attribute_value)
attribute_value = last_boot.replace(microsecond=0).isoformat()
if "new_version" in path:
attribute_value = attribute_value.split("/")[1].split("@")[0]
return attribute_value
|
bae16743c389a8b4bd760b458f187bc4130970ca
| 3,636,632
|
def submit(request):
""" View for the submit page.
"""
if request.user.is_active:
user = UserSocialAuth.objects.filter(provider='github').get(user_id=request.user.id)
github = Github(user.tokens[u'access_token'])
repos = [repo for repo in github.get_user().get_repos()]
return render(request, 'base/submit.html', {'repos': repos})
else:
return HttpResponseRedirect(reverse('socialauth_begin', args=('github',)))
|
ec6c1107547b444923bab904ce79c848228320be
| 3,636,633
|
def all_combined_threshold(input_image):
"""
Apply all thresholds to undistorted image
:param input_image: Undistorted image
:return: Combined binary image
Ref: Course notes
"""
# Apply Gausiian blur to the input image
kernel_size = 5
input_image = cv2.GaussianBlur(input_image, (kernel_size, kernel_size), 0)
# Sobel kernel size
ksize = 5 # Should be an odd number to smooth a gradient
# Apply threshold functions
absolute_binaryX = absolute_sobel_threshold(input_image, orient='x', thresh=(20, 255)) # thresh=(30, 255)
absolute_binaryY = absolute_sobel_threshold(input_image, orient='y', thresh=(30, 255)) # thresh=(30, 255)
magnitude_binary = magnitude_threshold(input_image, sobel_kernel=ksize, mag_thresh=(70, 255)) # mag_thresh=(60, 255)
direction_binary = direction_threshold(input_image, sobel_kernel=ksize, thresh=(0.7, 1.3))
# Combine the thresholds
combine_all_binary = np.zeros_like(direction_binary)
combine_all_binary[(absolute_binaryX == 1 | ((absolute_binaryY == 1)
& (direction_binary == 1))) | magnitude_binary == 1] = 1
return combine_all_binary
|
638ec26ffd08ccc24bd5dea288cae55466d50c67
| 3,636,634
|
def sbol_cds (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL coding sequence renderer.
"""
# Default options
color = (0.7,0.7,0.7)
hatch = ''
start_pad = 1.0
end_pad = 1.0
y_extent = 5
x_extent = 30
arrowhead_height = 4
arrowhead_length = 8
# Reset defaults if provided
if opts != None:
if 'color' in opts.keys():
color = opts['color']
if 'hatch' in opts.keys():
hatch = opts['hatch']
if 'start_pad' in opts.keys():
start_pad = opts['start_pad']
if 'end_pad' in opts.keys():
end_pad = opts['end_pad']
if 'y_extent' in opts.keys():
y_extent = opts['y_extent']
if 'x_extent' in opts.keys():
x_extent = opts['x_extent']
if 'arrowhead_height' in opts.keys():
arrowhead_height = opts['arrowhead_height']
if 'arrowhead_length' in opts.keys():
arrowhead_length = opts['arrowhead_length']
if 'linewidth' in opts.keys():
linewidth = opts['linewidth']
if 'scale' in opts.keys():
scale = opts['scale']
# Check direction add start padding
dir_fac = 1.0
final_end = end
final_start = prev_end
if start > end:
dir_fac = -1.0
start = prev_end+end_pad+x_extent
end = prev_end+end_pad
final_end = start+start_pad
else:
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
# Draw the CDS symbol
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(end-dir_fac*arrowhead_length, -y_extent),
(end-dir_fac*arrowhead_length, -y_extent-arrowhead_height),
(end, 0),
(end-dir_fac*arrowhead_length, y_extent+arrowhead_height),
(end-dir_fac*arrowhead_length, y_extent)],
edgecolor=(0.0,0.0,0.0), facecolor=color, linewidth=linewidth,
hatch=hatch, zorder=11,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0
ax.add_patch(p1)
if opts != None and 'label' in opts.keys():
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
|
5647edc622c23445c6e3d8cae5f602f2a5167516
| 3,636,636
|
def convert_halo_to_array_form(halo, ndim):
"""
Converts the :samp:`{halo}` argument to a :samp:`(ndim, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, an :samp:`{ndim}` length sequence
of :samp:`int` or :samp:`({ndim}, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`({ndim}, 2)` shaped array form.
:type ndim: :obj:`int`
:param ndim: Number of dimensions.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`({ndim}, 2)` shaped array of :obj:`numpy.int64` elements.
Examples::
>>> convert_halo_to_array_form(halo=2, ndim=4)
array([[2, 2],
[2, 2],
[2, 2],
[2, 2]])
>>> convert_halo_to_array_form(halo=[0, 1, 2], ndim=3)
array([[0, 0],
[1, 1],
[2, 2]])
>>> convert_halo_to_array_form(halo=[[0, 1], [2, 3], [3, 4]], ndim=3)
array([[0, 1],
[2, 3],
[3, 4]])
"""
dtyp = _np.int64
if halo is None:
halo = _np.zeros((ndim, 2), dtype=dtyp)
elif is_scalar(halo):
halo = _np.zeros((ndim, 2), dtype=dtyp) + halo
elif (ndim == 1) and (_np.array(halo).shape == (2,)):
halo = _np.array([halo, ], copy=True, dtype=dtyp)
elif len(_np.array(halo).shape) == 1:
halo = _np.array([halo, halo], dtype=dtyp).T.copy()
else:
halo = _np.array(halo, copy=True, dtype=dtyp)
if halo.shape[0] != ndim:
raise ValueError(
"Got halo.shape=%s, expecting halo.shape=(%s, 2)"
%
(halo.shape, ndim)
)
return halo
|
ddf718d16ce6a13f48b1032988ec0b0a43aa2b47
| 3,636,637
|
def tran_canny(image):
"""消除噪声"""
image = cv2.GaussianBlur(image, (3, 3), 0)
return cv2.Canny(image, 50, 150)
|
d9c308b43a25e714a8ddd66bdc4a700c6ec926f0
| 3,636,638
|
import math
def draw_star(img_size, num_frames, bg_config, nb_branches=6):
""" Draw a star and output the interest points
Parameters:
nb_branches: number of branches of the star
"""
images = generate_background(img_size, num_frames=num_frames)
background_color = int(np.mean(images))
num_branches = random_state.randint(3, nb_branches)
min_dim = min(img_size[0], img_size[1])
thickness = random_state.randint(min_dim * 0.01, min_dim * 0.02)
rad = max(random_state.rand() * min_dim / 2, min_dim / 5)
x = random_state.randint(rad, img_size[1] - rad) # select the center of a circle
y = random_state.randint(rad, img_size[0] - rad)
# Sample num_branches points inside the circle
slices = np.linspace(0, 2 * math.pi, num_branches + 1)
angles = [slices[j] + random_state.rand() * (slices[j+1] - slices[j])
for j in range(num_branches)]
points = np.array([[int(x + max(random_state.rand(), 0.3) * rad * math.cos(a)),
int(y + max(random_state.rand(), 0.3) * rad * math.sin(a))]
for a in angles])
points = np.concatenate(([[x, y]], points), axis=0)
color = get_random_color(background_color)
rotation = get_random_rotation()
speed = get_random_speed()
pts_list = []
img_list = []
for i in range(num_frames):
img = images[i]
pts = np.empty((0, 2), dtype=np.int)
center = (points[0][0], points[0][1])
points = (np.matmul(points - center, rotation) + center + speed).astype(int)
for j in range(1, num_branches + 1):
cv.line(img, (points[0][0], points[0][1]),
(points[j][0], points[j][1]),
int(color[j]), thickness)
# Keep only the points inside the image
pts = keep_points_inside(points, img_size)
if len(pts) == 0:
draw_star(img_size, num_frames, bg_config, nb_branches)
pts_list.append(pts)
img_list.append(img)
images = np.array(img_list)
points = np.array(pts_list)
event_sim = es.Event_simulator(images[0], 0)
events = np.array([event_sim.simulate(img, 0) for img in images[1:]])
return images, points, events
|
bec71099fae94f43af14327ab8c5549586f3bab2
| 3,636,639
|
def forestvar(z_in):
""" Return intrinsic variance of LyaF variance for weighting. This
estimate is roughly from McDonald et al 2006
Parameters
----------
z_in : float or ndarray
Returns
-------
fvar : float or ndarray
Variance
"""
fvar = 0.065 * ((1.+z_in)/(1.+2.25))**3.8
# Return
return fvar
|
d3523510ee29b0cc12138da93001635f5ffe6a11
| 3,636,640
|
def _process_image_file(fobj):
"""Process image files from the dataset."""
# We need to read the image files and convert them to JPEG, since some files
# actually contain GIF, PNG or BMP data (despite having a .jpg extension) and
# some encoding options that will make TF crash in general.
image = _decode_image(fobj)
return _encode_image(image, image_format="JPEG")
|
6e9e1e28a8e057a164b7385e87836dd280efdb9d
| 3,636,641
|
import collections
def compute_v2g_scores(reg, cisreg):
"""
Goes through evidence and scores associations to a SNP
Args:
* [ Regulatory_Evidence ]
* [ Cisregulatory_Evidence ]
Returntype: dict(Gene: dict(string: float)), dict(Gene: float)
"""
intermediary_scores = dict()
gene_scores = dict()
for gene in cisreg:
intermediary_scores[gene] = collections.defaultdict(int)
seen = set()
for evidence in cisreg[gene] + reg:
if evidence.source not in seen or float(evidence.score) > intermediary_scores[gene][evidence.source]:
intermediary_scores[gene][evidence.source] = float(evidence.score)
seen.add(evidence.source)
# VEP stats
if evidence.source == 'VEP':
intermediary_scores[gene]['VEP_count'] += 1
intermediary_scores[gene]['VEP_sum'] += float(evidence.score)
if evidence.source == 'GTEx':
intermediary_scores[gene][evidence.tissue] = float(evidence.score)
# Ad hoc bounds defined here:
# PCHiC
intermediary_scores[gene]['PCHiC'] = min(intermediary_scores[gene]['PCHiC'], 1)
# VEP
if 'VEP' in intermediary_scores[gene]:
intermediary_scores[gene]['VEP_mean'] = intermediary_scores[gene]['VEP_sum'] / intermediary_scores[gene]['VEP_count']
# Weighted sum
gene_scores[gene] = sum(intermediary_scores[gene][source] * postgap.Globals.EVIDENCE_WEIGHTS[source] for source in intermediary_scores[gene] if source in postgap.Globals.EVIDENCE_WEIGHTS)
return intermediary_scores, gene_scores
|
137a17ba0dbec6ce4e3fcd661709c9a166312e2a
| 3,636,642
|
from re import T
def _old_normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
"""
if gamma is None:
gamma = ones_like(x)
if beta is None:
beta = zeros_like(x)
dev = theano.config.device
use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
if use_cudnn:
broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
try:
normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
normed = theano.tensor.as_tensor_variable(normed)
mean = theano.tensor.as_tensor_variable(mean)
stdinv = theano.tensor.as_tensor_variable(stdinv)
var = T.inv(stdinv ** 2)
return normed, T.flatten(mean), T.flatten(var)
except AttributeError:
pass
var = x.var(reduction_axes)
mean = x.mean(reduction_axes)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(x.shape[axis])
target_shape = T.stack(*target_shape)
broadcast_mean = T.reshape(mean, target_shape)
broadcast_var = T.reshape(var, target_shape)
broadcast_beta = T.reshape(beta, target_shape)
broadcast_gamma = T.reshape(gamma, target_shape)
normed = batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma,
epsilon)
return normed, mean, var
|
1993d33c8d2d5ece26d2ac804dfb0961d02d24e2
| 3,636,643
|
def ceil_to_batch_size(num, batch_size):
"""Calculate how many full batches in num.
Parameters
----------
num : int
batch_size : int
"""
return int(batch_size * ceil(num / batch_size))
|
69827028a856248c50e958761e4d106a304076e3
| 3,636,644
|
import torch
def quaternion_to_rotation_matrix(quaternion):
"""
This function transforms a quaternion into a 3x3 rotation matrix.
Parameters
----------
:param quaternion: a quaternion or a batch of quaternion N x [scalar term, vector term]
Returns
-------
:return: 3x3 rotation matrices
"""
init_shape = list(quaternion.shape)
q = quaternion.view(-1, init_shape[-1])
R = torch.zeros((quaternion.shape[0], 3, 3), dtype=quaternion.dtype).to(device)
for i in range(R.shape[0]):
w, x, y, z = q[i]
R[i] = torch.tensor([[2 * (w ** 2 + x ** 2) - 1, 2 * (x * y - w * z), 2 * (x * z + w * y)],
[2 * (x * y + w * z), 2 * (w ** 2 + y ** 2) - 1, 2 * (y * z - w * x)],
[2 * (x * z - w * y), 2 * (y * z + w * x), 2 * (w ** 2 + z ** 2) - 1]],
dtype=quaternion.dtype)
new_shape = init_shape[:-1]
new_shape.append(3)
new_shape.append(3)
return R.view(new_shape)
|
ace8bca2b2e512b499dcc8ebac382226699262d2
| 3,636,645
|
def session_end(bot):
""":crossed_flags: *TRPGのセッションを終わります*\n`/cc kp end`"""
target_status = "pc_id"
user_data = {}
lst_end_content = []
lst_player_data = get_lst_player_data(bot.team_id, bot.user_id, target_status)
msg_return = "| 名前 | PC | 備考 |\n|--|--|--|\n"
for player_data in lst_player_data:
name = player_data["name"]
user_id = player_data["user_id"]
url = player_data["user_param"]["url"]
user_data[user_id] = {"url": url,
"name": name}
lst_users_list = get_users_list(bot.token)
for user_id, user_datum in user_data.items():
# N+! 誰がいい感じに
player_data = list(filter(lambda x: x["id"] == user_id , lst_users_list))
if player_data is None:
continue
pc_name = user_datum["name"]
url = user_datum["url"]
real_name = player_data[0]["real_name"]
msg_return += f"| @{real_name} | [{pc_name}]({url}) | |\n"
return msg_return, None
|
1f4bfa5d719be1e856efbea8d51df2af092dc119
| 3,636,646
|
def make_input_signature(inputs, include_tensor_ranks_only,
encode_variables_by_resource_id):
"""Generates an input signature representation.
Args:
inputs: The function inputs that need to be formed into a signature
include_tensor_ranks_only: If Tensors should be considered by rank
encode_variables_by_resource_id: If Variables should be considered by
resource id
Returns:
An object representing the input signature
"""
return pywrap_tfe.TFE_Py_EncodeArg(
inputs, include_tensor_ranks_only, encode_variables_by_resource_id)
|
445adabf927614dea23131a973b12d98117d79e5
| 3,636,648
|
def _activities_from_datasets_followed_by_user_query(
user_id: str, limit: int
) -> QActivity:
"""Return a query for all activities from datasets that user_id follows."""
# Get a list of the datasets that the user is following.
follower_objects = model.UserFollowingDataset.followee_list(user_id)
if not follower_objects:
# Return a query with no results.
return model.Session.query(Activity).filter(text("0=1"))
return _activities_union_all(
*[
_activities_limit(
_package_activity_query(follower.object_id), limit
)
for follower in follower_objects
]
)
|
7e7a3111515f9da625c554f283aa5d948fd45080
| 3,636,649
|
def getRowType(row):
"""Infers types for each row"""
d = row
for col, data in enumerate(row):
try:
if isNone(data):
d[col] = 'none'
else:
num = float(data)
if num.is_integer():
d[col] = 'int'
else:
d[col] = 'double'
except:
try:
toDate(data)
d[col] = 'date'
except:
d[col] = 'string'
return d
|
af0e853defb95005ece8727d58b8f58db8411afe
| 3,636,650
|
def fetch_lawschool_gpa(subset="all", usecols=[], dropcols=[],
numeric_only=False, dropna=False):
"""Load the Law School GPA dataset
Note:
By default, the data is downloaded from tempeh. See
https://github.com/microsoft/tempeh for details.
Args:
subset ({'train', 'test', or 'all'}, optional): Select the dataset to
load: 'train' for the training set, 'test' for the test set, 'all'
for both.
usecols (single label or list-like, optional): Feature column(s) to
keep. All others are dropped.
dropcols (single label or list-like, optional): Feature column(s) to
drop.
numeric_only (bool): Drop all non-numeric feature columns.
dropna (bool): Drop rows with NAs.
Returns:
namedtuple: Tuple containing X, y, and sample_weights for the Law School
GPA dataset accessible by index or name.
"""
if subset not in {'train', 'test', 'all'}:
raise ValueError("subset must be either 'train', 'test', or 'all'; "
"cannot be {}".format(subset))
dataset = tc.datasets["lawschool_gpa"]()
X_train, X_test = dataset.get_X(format=pd.DataFrame)
y_train, y_test = dataset.get_y(format=pd.Series)
A_train, A_test = dataset.get_sensitive_features(name='race',
format=pd.Series)
all_train = pd.concat([X_train, y_train, A_train], axis=1)
all_test = pd.concat([X_test, y_test, A_test], axis=1)
if subset == "train":
df = all_train
elif subset == "test":
df = all_test
else:
df = pd.concat([all_train, all_test], axis=0)
return standardize_dataset(df, prot_attr=['race'], target='zfygpa',
usecols=usecols, dropcols=dropcols,
numeric_only=numeric_only, dropna=dropna)
|
9b01070fb62e0d28dc5961d84fac311a549e258f
| 3,636,651
|
def check_ref_exons(exon_seqs, mask_stops):
"""Check if the reference sequence is correct.
Should start with ATG and end with a stop.
Mask_stops controls handling of inframe stops.
"""
sec_codons = set() # in case there are TGA codons in the ref seq -> collect them
gene_seq = "".join([exon_seqs[i] for i in range(len(exon_seqs.keys()))])
codons = parts(gene_seq, n=3) # split a seq of letterns in chuncks of len == 3
if codons[0] != "ATG":
eprint("Input is corrupted! Reference sequence should start with ATG!")
elif codons[-1] not in STOPS:
eprint("Input is corrupted! Reference sequence should end with a stop codon!")
stop_codons = [(n, c) for n, c in enumerate(codons[:-1]) if c in STOPS]
if len(stop_codons) == 0: # no stop codons -> nothing else to do
return exon_seqs, set()
# there are stop codons in reference sequence:
eprint("Warning! There are inframe stop codons!")
for stop in stop_codons:
eprint(f"Codon num {stop[0] + 1} - {stop[1]}")
codons[stop[0]] = "NNN" if mask_stops else codons[stop[0]]
if stop[1] == "TGA":
# maybe a sec codon
sec_codons.add(stop[0])
eprint(">>>STOP_CODON>>>") if not mask_stops else None
die("Abort, there are inframe stop codons.", 0) if not mask_stops else None
# if stop codons in reference are allowed, then we need to mask them (rewrite as NNN)
# otherwise CESAR will show an error
safe_seq = "".join(codons)
stop_masked = {}
prev_index = 0
for num, exon_seq in exon_seqs.items():
exon_len = len(exon_seq)
stop_masked[num] = safe_seq[prev_index: prev_index + exon_len]
prev_index += exon_len
return stop_masked, sec_codons
|
9b7e101cb055ee561ad54beb9b2c15c43044f2fc
| 3,636,652
|
def resistancedistances(graph):
"""
Returns the pairwise resistance distances on the given graph.
Args:
network: networkx graph
Returns:
Dictionary of pairwise resistance distances,
accessed by the (i,j) node labels
"""
nodes = graph.nodes()
nodecount = len(nodes)
nodenrs = range(nodecount)
labeling = dict(zip(nodenrs,graph.nodes()))
L = np.linalg.pinv(nx.laplacian_matrix(graph))
rdist = {}
for i in nodenrs:
rdist[labeling[i]] = {}
for j in nodenrs:
rdist[labeling[i]][labeling[j]] = L[i,i] + L[j,j] - L[i,j] - L[j,i]
return rdist
|
9e3b44602a0dce55516947768ad136e47397af86
| 3,636,654
|
def flatten_to_raster(data):
""" Flatten numpy array of various dimensions to RGB raster image.
:param data: numpy array of one of following sizes.
1) H x W x C (color/gray image)
2) N x Y x X x C (array of color/gray images)
3) nY x nX x Y x X x C (2d array of color/gray images)
(C has to be 1 or 3)
E.g., C = 1
<---------------W--------------->
------- ------- ------- -------
^ | | | | |
| Y 1 | 2 | ... | nX |
| | | | | |
| ---X--- ------- ------- -------
| | | | | |
| | 2 | | | |
| | | | | |
H ------- ------- ------- -------
| | | | | |
| | ... | | | |
| | | | | |
| ------- ------- ------- -------
| | | | | |
| | nY | | | |
v | | | | |
------- ------- ------- -------
Input -> Output
1) H x W x C -> H x W x C
2) N x Y x X x C -> (nY*Y) x (nX*X) x C
where nX & nY are factors of N such that we get as close to square grid as possible
(with bias towards having more columns than rows so for 12 images we have nY x nX = 3 x 4 grid)
3) nH x nW x H x W x C -> (nY*Y) x (nX*X) x C
"""
n_dim = len(data.shape)
if n_dim == 3:
H, W, C = data.shape
nY, nX = 1, 1
flattened = data
n_states = nY * nX
elif n_dim == 4:
# N x Y x X x C (array of color/gray images)
image_grid = image_array_to_grid(data)
nY, nX, Y, X, C = image_grid.shape
flattened = image_grid_to_raster(image_grid) # .transpose(1,0,2,3,4)
n_states = len(data)
elif n_dim == 5:
image_grid = data
nY, nX, Y, X, C = image_grid.shape
flattened = image_grid_to_raster(image_grid)
n_states = nY * nX
else:
raise Exception("data dimension {} not supported!".format(n_dim))
return flattened, nY, nX, n_states
|
2cb9c12a1d3efb7885f1f4d34d0eb3fc9b4d35c0
| 3,636,655
|
def read_google(url,**kwargs):
"""
Reads a google sheet
"""
if url[-1]!='/':
url+='/'
return pd.read_csv(url+'export?gid=0&format=csv',**kwargs)
|
286158dc007378eef84ed048cda54a93c41dc140
| 3,636,656
|
def ts_inspect_2d(target, *preds, start_date=None, freq=None):
"""
Builds TSMertics for point predictions only, creating internal representation for it.
"""
return TSMetrics(
xr_2d_factory(target, start_date=start_date, freq=freq),
*[xr_2d_factory(p, start_date=start_date, freq=freq) for p in preds],
)
|
34bd33ac53622c2b5441613fed9b8573d019dfcb
| 3,636,657
|
def adjust_age_groups(age_labels):
"""
for each pair of cols to aggregate, takes the first number of the first element, and the last number for the last element
for instance: ["0-4",'5-10'] -> ['0-10']
"""
i=0
new_age_labels=[]
label=""
for element in age_labels:
if i%2==0:
label+=element.split('-')[0]
i+=1
elif i%2==1:
label=label+'-'+element.split('-')[-1]
new_age_labels.append(label)
label=""
i+=1
#making the last agegroup based on the first number +
new_age_labels[-1]= new_age_labels[-1].split("-")[0]+"+"
return(new_age_labels)
|
521a2f6779ae8fa3f3a53801e0f935844245cffc
| 3,636,658
|
def get_kni_ports():
"""
A KNI port is a list of string of format vEth0_%d where %d is the port index.
"""
kni_ports = run_local_cmd('ifconfig | grep vEth0_ | cut -d\':\' -f1 ', get_output = True).split('\n')
return set([port for port in kni_ports if port != ''])
|
1f04adea6c080bf5800e86dea559207048c345f3
| 3,636,659
|
import re
def parse_log(file_abspath):
"""Parse warning and error info from TRNSYS generated log file.
Parses warning and error count when simulation ends with errors.
If simulation ends successfully, counts number of warnings and return
successful completion message.
Args:
file_abspath: absolute path to result file.
Returns:
dict.
Raises:
IOError: problem reading out_file
"""
runsumdict = {}
# Parse data from Type-46-generated tab separated file and return a list of
# dicts (one dict per each row of result file)
pat01 = re.compile(r'Simulation stopped with errors')
pat02 = re.compile(r'Total Warnings\s+:\s+(\d+)')
pat03 = re.compile(r'Total Fatal Errors\s+:\s+(\d+)')
pat04 = re.compile(r'Warning at time')
with open(file_abspath, 'rU') as log_f:
temp = log_f.read()
match = pat01.search(temp)
if match:
runsumdict['Message'] = match.group()
match = pat02.search(temp)
runsumdict['Warnings'] = int(match.group(1))
match = pat03.search(temp)
runsumdict['Errors'] = int(match.group(1))
else:
runsumdict['Message'] = "Simulation ended successfully"
match = pat04.findall(temp)
runsumdict['Warnings'] = len(match)
runsumdict['Errors'] = 0
return runsumdict
|
9fef07ac7a6f035b536c105d401b1f9f4413f629
| 3,636,660
|
from typing import Union
def extract_publish_info_from_issue(
issue: "Issue", publish_type: PublishType
) -> Union[PublishInfo, MyValidationError]:
"""从议题中提取发布所需数据"""
try:
if publish_type == PublishType.BOT:
return BotPublishInfo.from_issue(issue)
elif publish_type == PublishType.PLUGIN:
return PluginPublishInfo.from_issue(issue)
return AdapterPublishInfo.from_issue(issue)
except MyValidationError as e:
return e
|
bbe5a8d5e7971b335334aef8e6b158df6fa42146
| 3,636,661
|
def exploration_function(q_space, x_space, index_, action_space_n, k):
"""returns exploration value"""
x_value = float('-inf')
for i in range(action_space_n):
x_value = max(x_value, q_space[index_][i] + k/(1 + x_space[index_][i]))
#print("q={}, q+x_bonus={}".format(max(q_space[index_]), x_value))
return x_value
|
9c6f1aa2943436d75c9a7735b4efa2c44c8a08d1
| 3,636,663
|
def get_vectors(model_dm, model_dbow):
"""
将训练完成的数据转换为vectors
:param model_dm:
:param model_dbow:
:return:
"""
# 获取训练数据集的文档向量
train_vecs_dm = getVecs(model_dm, x_train, size)
train_vecs_dbow = getVecs(model_dbow, x_train, size)
train_vecs = np.hstack((train_vecs_dm, train_vecs_dbow))
# 获取测试数据集的文档向量
test_vecs_dm = getVecs(model_dm, x_test, size)
test_vecs_dbow = getVecs(model_dbow, x_test, size)
test_vecs = np.hstack((test_vecs_dm, test_vecs_dbow))
return train_vecs, test_vecs
|
9f302bccd63c43bcf685851550f553ea1283de51
| 3,636,664
|
def test_profile_queue(db, test_profile, test_project_data):
"""A queue with test data, associated with the first test profile."""
return add_queue(test_project_data, TEST_QUEUE_LEN, profile=test_profile)
|
2e23f456972a7a617be1edd86bcf990286c0638b
| 3,636,665
|
import sqlite3
def user_has_registered(userID):
"""Checks if a particular user has been registered in database"""
database = sqlite3.connect("users.db")
cursor = database.cursor()
cursor.execute(f"SELECT user_id FROM profile WHERE user_id = {userID}")
result = cursor.fetchone()
if result is None:
return False
return True
|
e98f83b272a52828638f276575596489bebe1fcf
| 3,636,666
|
def prepare_ocp(
biorbd_model_path: str,
final_time: float,
n_shooting: int,
marker_velocity_or_displacement: str,
marker_in_first_coordinates_system: bool,
control_type: ControlType,
ode_solver: OdeSolver = OdeSolver.RK4(),
) -> OptimalControlProgram:
"""
Prepare an ocp that targets some marker velocities, either by finite differences or by jacobian
Parameters
----------
biorbd_model_path: str
The path to the bioMod file
final_time: float
The time of the final node
n_shooting: int
The number of shooting points
marker_velocity_or_displacement: str
which type of tracking: finite difference ('disp') or by jacobian ('velo')
marker_in_first_coordinates_system: bool
If the marker to track should be expressed in the global or local reference frame
control_type: ControlType
The type of controls
ode_solver: OdeSolver
The ode solver to use
Returns
-------
The OptimalControlProgram ready to be solved
"""
biorbd_model = biorbd.Model(biorbd_model_path)
# Add objective functions
if marker_in_first_coordinates_system:
# Marker should follow this segment (0 velocity when compare to this one)
coordinates_system_idx = 0
else:
# Marker should be static in global reference frame
coordinates_system_idx = -1
objective_functions = ObjectiveList()
if marker_velocity_or_displacement == "disp":
objective_functions.add(
ObjectiveFcn.Lagrange.MINIMIZE_MARKERS_DISPLACEMENT,
coordinates_system_idx=coordinates_system_idx,
index=6,
weight=1000,
)
elif marker_velocity_or_displacement == "velo":
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_MARKERS_VELOCITY, index=6, weight=1000)
else:
raise RuntimeError(
f"Wrong choice of marker_velocity_or_displacement, actual value is "
f"{marker_velocity_or_displacement}, should be 'velo' or 'disp'."
)
# Make sure the segments actually moves (in order to test the relative speed objective)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_STATE, index=6, weight=-1)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_STATE, index=7, weight=-1)
# Dynamics
dynamics = DynamicsList()
dynamics.add(DynamicsFcn.TORQUE_DRIVEN)
# Path constraint
nq = biorbd_model.nbQ()
x_bounds = BoundsList()
x_bounds.add(bounds=QAndQDotBounds(biorbd_model))
for i in range(nq, 2 * nq):
x_bounds[0].min[i, :] = -10
x_bounds[0].max[i, :] = 10
# Initial guess
x_init = InitialGuessList()
x_init.add([1.5, 1.5, 0.0, 0.0, 0.7, 0.7, 0.6, 0.6])
# Define control path constraint
tau_min, tau_max, tau_init = -100, 100, 0
u_bounds = BoundsList()
u_bounds.add([tau_min] * biorbd_model.nbGeneralizedTorque(), [tau_max] * biorbd_model.nbGeneralizedTorque())
u_init = InitialGuessList()
u_init.add([tau_init] * biorbd_model.nbGeneralizedTorque())
return OptimalControlProgram(
biorbd_model,
dynamics,
n_shooting,
final_time,
x_init,
u_init,
x_bounds,
u_bounds,
objective_functions,
control_type=control_type,
ode_solver=ode_solver,
)
|
6a991931b7cd458611a9467e6c6c7ba4f0235150
| 3,636,667
|
def key_gen(**kwargs):
"""
Key generator for linux. Determines key based on
parameters supplied in kwargs.
Keyword Parameters:
@keyword geounit1: portable_id of a geounit
@keyword geounit2: portable_id of a geounit
@keyword region: region abbreviation
"""
if 'geounit1' in kwargs and 'geounit2' in kwargs:
return 'adj:geounit1:%s:geounit2:%s' % (kwargs['geounit1'],
kwargs['geounit2'])
if 'region' in kwargs:
return 'adj:region:%s' % kwargs['region']
|
02426fbf49e7a4d85094896546980828e2c6bc20
| 3,636,668
|
from typing import List
from typing import Optional
def find(bindings: List[Binding], name: str) -> Optional[Binding]:
"""
Returns a Binding with a given name. Comparison is case-insensitive.
:param bindings: the Bindings to find in
:param name: the name of the Binding to find
:return: the Binding with a given name if it exists, None otherwise
"""
for b in bindings:
if b.get_name().lower() == name.lower():
return b
return None
|
b5efb45c6c9ca982ffa0949a599dc3e6b1f8a948
| 3,636,669
|
from typing import Dict
async def init_menu_perms(request: Request) -> Dict:
"""
初始化菜单和权限
"""
return await services.init_menu_perms(request)
|
d863d89a857097434885d59b037cd4ff1cf5fe8f
| 3,636,671
|
def bert_process_sentence(example_tokens, max_seq_length, tokenizer):
"""
Tokenization and pre-processing of text as expected by Bert
Parameters
----------
example_tokens
max_seq_length
tokenizer
Returns
-------
"""
# Account for [CLS] and [SEP] with "- 2"
if len(example_tokens) > max_seq_length - 2:
example_tokens = example_tokens[0:(max_seq_length - 2)]
# The convention in BERT for single sequences is:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. (vv: Not relevant for us)
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
# vv: segment_ids seem to be the same as type_ids
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in example_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return input_ids, input_mask, segment_ids
|
992f6ccbdcfdb4498a6aa226efee8d26844d435a
| 3,636,672
|
def weight_diff(w1, w2):
""" Calculates the array of differences between the weights in arrays """
# Expand and flatten arrays
_w1 = np.hstack([x.flatten() for x in w1])
_w2 = np.hstack([x.flatten() for x in w2])
return _w1 - _w2
|
0e9154aa723335a6d6d53382c67abe25523508e9
| 3,636,673
|
def advect_salinity(vs):
"""
integrate salinity
"""
return advect_tracer(vs, vs.salt[..., vs.tau], vs.dsalt[..., vs.tau])
|
b760f4bc9144db1ea1bc6a80c075f392a4c0acb1
| 3,636,675
|
import math
def lafferty_wyatt_point(lowedge, highedge, expo_slope):
"""calculates the l-w point for a bin where the true distribution is an
exponential characterized by expo_slope.
"""
rhs = (math.exp(expo_slope*highedge) - math.exp(expo_slope*lowedge))
rhs /= expo_slope
rhs /= (highedge - lowedge)
return math.log(rhs) / expo_slope
|
326acddc1926f1a142f34e8cff9109554ec850d3
| 3,636,676
|
def init_critical_cases_20():
"""
Real Name: b'init Critical Cases 20'
Original Eqn: b'0'
Units: b'person'
Limits: (None, None)
Type: constant
b''
"""
return 0
|
735a4df9c2ee5777c7c15dcf4fb4a3830cb5e0b6
| 3,636,677
|
def build_doc(pic_dic):
"""
Gets dict {'image-name':['image-path',text]} ==> doc obj
"""
doc = word_obj()
# Add footer
doc.sections[0].footer.paragraphs[0].alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
add_page_number(doc.sections[0].footer.paragraphs[0].add_run())
# Started Page
starter_page(doc)
for pic_name, pic_details in pic_dic.items():
# Add head
head = doc.add_heading(pic_name, 6)
# add header
heading(head,doc)
# Add pic
doc.add_picture(pic_details[0], width=Inches(6) ,height=Inches(4))
# Add paragraph
p = doc.add_paragraph(pic_details[1])
paragraph(p,doc)
##
doc.add_page_break()
return doc
|
1172f4b83e678fd2b363914c374667059af6ccbb
| 3,636,678
|
def dispatch_every_hour(one_time_password):
""" This is the receiving point of start_every_hour's post request. It
checks that the one time password is correct and then dispatches
every_hour. """
EveryHourOTP.check_password(one_time_password)
Process(target=every_hour).start()
return "success"
|
e03f6bccbfb76fd25572bebde78151832b994928
| 3,636,679
|
def check_list(data):
"""check if data is a list, if it is not a list, it will return a list as [data]"""
if type(data) is not list:
return [data]
else:
return data
|
00ae7a857c3f969ca435928edf98ed5bb36c1c34
| 3,636,680
|
def nfvi_reinitialize(config):
"""
Re-initialize the NFVI package
"""
global _task_worker_pools
init_complete = True
compute_plugin_disabled = (config.get('compute_plugin_disabled',
'False') in DISABLED_LIST)
if not compute_plugin_disabled:
init_complete = nfvi_compute_initialize(config,
_task_worker_pools['compute'])
return init_complete
|
a8f76bf228b94fadc18dbe0e97a21109ee607935
| 3,636,681
|
import re
def parse_freqs(lines, parameters):
"""Parse the basepair frequencies.
"""
root_re = re.compile("Note: node (\d+) is root.")
branch_freqs_found = False
base_freqs_found = False
for line in lines:
# Find all floating point numbers in this line
line_floats_res = line_floats_re.findall(line)
line_floats = [float(val) for val in line_floats_res]
# Find base frequencies from baseml 4.3
# Example match:
# "Base frequencies: 0.20090 0.16306 0.37027 0.26577"
if "Base frequencies" in line and line_floats:
base_frequencies = {}
base_frequencies["T"] = line_floats[0]
base_frequencies["C"] = line_floats[1]
base_frequencies["A"] = line_floats[2]
base_frequencies["G"] = line_floats[3]
parameters["base frequencies"] = base_frequencies
# Find base frequencies from baseml 4.1:
# Example match:
# "base frequency parameters
# " 0.20317 0.16768 0.36813 0.26102"
elif "base frequency parameters" in line:
base_freqs_found = True
# baseml 4.4 returns to having the base frequencies on the next line
# but the heading changed
elif "Base frequencies" in line and not line_floats:
base_freqs_found = True
elif base_freqs_found and line_floats:
base_frequencies = {}
base_frequencies["T"] = line_floats[0]
base_frequencies["C"] = line_floats[1]
base_frequencies["A"] = line_floats[2]
base_frequencies["G"] = line_floats[3]
parameters["base frequencies"] = base_frequencies
base_freqs_found = False
# Find frequencies
# Example match:
# "freq: 0.90121 0.96051 0.99831 1.03711 1.10287"
elif "freq: " in line and line_floats:
parameters["rate frequencies"] = line_floats
# Find branch-specific frequency parameters
# Example match (note: I think it's possible to have 4 more
# values per line, enclosed in brackets, so I'll account for
# this):
# (frequency parameters for branches) [frequencies at nodes] (see Yang & Roberts 1995 fig 1)
#
# Node #1 ( 0.25824 0.24176 0.25824 0.24176 )
# Node #2 ( 0.00000 0.50000 0.00000 0.50000 )
elif "(frequency parameters for branches)" in line:
parameters["nodes"] = {}
branch_freqs_found = True
elif branch_freqs_found:
if line_floats:
node_res = re.match("Node \#(\d+)", line)
node_num = int(node_res.group(1))
node = {"root": False}
node["frequency parameters"] = line_floats[:4]
if len(line_floats) > 4:
node["base frequencies"] = {"T": line_floats[4],
"C": line_floats[5],
"A": line_floats[6],
"G": line_floats[7]}
parameters["nodes"][node_num] = node
else:
root_res = root_re.match(line)
if root_res is not None:
root_node = int(root_res.group(1))
parameters["nodes"][root_node]["root"] =\
True
branch_freqs_found = False
return parameters
|
b0940b15aba9387e9257fd47bd5cbbd8dbf821ea
| 3,636,682
|
def student2nation(id_num):
"""
Takes student id, returns nation id of the student.
"""
return school2nation(id_num)
|
9453a6b2b9f31bbeb6c319cc450a757f3c8585b0
| 3,636,683
|
def get_projects_with_builds(only_public=True, only_active_versions=True):
"""Returns a queryset of Projects with active only public by default builds."""
builds = Build.objects.filter(
success=True,
state='finished',
version__active=True
)
if only_public:
builds = builds.filter(version__privacy_level='public',)
if only_active_versions:
builds = builds.filter(version__active=True)
filtered_projects = builds.values_list(
'project',
flat=True
)
return Project.objects.filter(
pk__in=filtered_projects
)
|
e1ae057fa983741fb291ac3c47cc5be628fe1711
| 3,636,684
|
import base64
def encode_base64(filename):
"""encode image to string.
Args
filename: image file path.
Returns:
a bites string.
"""
with open(filename, "rb")as f:
bs64 = base64.b64encode(f.read()).decode()
return bs64
|
9eab28ec1cb9619411ea28a9640a2fa8b02e61a3
| 3,636,685
|
def transfer_from_taoyuanagrichannel_to_taoyuanagriwaterdemand():
"""
Real Name: Transfer From TaoYuanAgriChannel To TaoYuanAgriWaterDemand
Original Eqn: (Transfer From ShiMenReservoir To HouChiWeir*Ratio AgriWater ShiMenReservoir To HouChiWeir In TaoYuanAgriChannel)*(1-Channel Transfer Loss Rate )
Units: m3
Limits: (None, None)
Type: component
Subs: None
"""
return (
transfer_from_shimenreservoir_to_houchiweir()
* ratio_agriwater_shimenreservoir_to_houchiweir_in_taoyuanagrichannel()
) * (1 - channel_transfer_loss_rate())
|
7bd77945002ed9217485dcf1f412cf333c666e30
| 3,636,686
|
def Doxyfile_emitter(target, source, env):
"""
Modify the target and source lists to use the defaults if nothing
else has been specified.
Dependencies on external HTML documentation references are also
appended to the source list.
"""
doxyfile_template = env.File(env['DOXYFILE_FILE'])
source.insert(0, doxyfile_template)
return target, source
|
41928a8c837d7eb00d6b4a4a2f407e2d75217620
| 3,636,687
|
def _recursive_make_immutable(o):
"""Recursively transform an object into an immutable form
This is a cdev core specific transformation that is used to convert Dict and List and other native python
types into frozendict, frozenset, etc. The purpose is that the later set of objects are immutable in python
and therefor can be used to directly compare against each other and be used as __hash__ able objects in
things like dicts and `networkx` DAGs.
Note the special case of handling Cloud Output Dict. These are identified as a dict with the key `id` that has
a value `cdev_cloud_output`.
Args:
o (Any): original object
Returns:
transformed_os
"""
# Note this is designed to be specifically used within the loading of a resource state. Therefor,
# we do not much error handling and let an error in the structure of the data be passed up all the
# way to `load_resource_state`
if isinstance(o, list):
return frozenset([_recursive_make_immutable(x) for x in o])
elif isinstance(o, dict):
if "id" in o:
if o.get("id") == "cdev_cloud_output":
tmp = {k: _recursive_make_immutable(v) for k, v in o.items()}
if not o.get("output_operations"):
return frozendict(tmp)
correctly_loaded_output_operations = _load_cloud_output_operations(
o.get("output_operations")
)
tmp["output_operations"] = correctly_loaded_output_operations
return frozendict(tmp)
return frozendict({k: _recursive_make_immutable(v) for k, v in o.items()})
return o
|
270dc2aaa07edf2f0f57aa298ea0f619e7412b80
| 3,636,688
|
def retry_condition(exception):
"""Return True if we should retry (in this case when it's an IOError), False otherwise"""
if isinstance(exception, (HTTPError, AttributeError)):
print(f'HTTP error occurred: {exception}') # Python 3.6
return True
return False
|
c6de8b160c071ed8055ed4dd1268ac97958166dd
| 3,636,689
|
def mergesort(input_arr):
"""
Sort the array by application of merge sort
Time complexity: O(n log(n))
Space Complexity: O(n)
Args:
input_arr(array): Input array with numbers to be sorted
Returns:
sorted_arr(array) Sorted array with numbers in ascending order
"""
if len(input_arr) <= 1:
return input_arr
mid = len(input_arr) // 2
left = input_arr[:mid]
right = input_arr[mid:]
left = mergesort(left)
right = mergesort(right)
return _merge(left, right)
|
433348035ea2bc41aef11dc3eaa8c51d16fffc81
| 3,636,690
|
def _clean_annotated_text(text):
"""Cleans text from the format that it was presented to annotators in the
S.M.A.R.T data annotation tool. Splits the title from the abstract text
and strips any trailing whitespace.
Returns:
title (str): The project title
text (str): The project abstract
"""
text = text.split('=====')
title = text[1].strip()
abstract = text[-1].strip()
return title, abstract
|
356cdf893225c41d303e83f1cf2f3418544c76ae
| 3,636,691
|
def get_or_create_event_loop():
"""
Tries to get the current event loop. If not found creates a new one.
Returns
-------
event_loop : ``EventThread``
"""
try:
event_loop = get_event_loop()
except RuntimeError:
event_loop = create_event_loop(daemon=False)
return event_loop
|
1dacd2172a0bffd2e5632ec48b90a1c1ee31800d
| 3,636,692
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.